code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self ,__a ,__a ,__a = None ,__a = 50_257 ,__a = 1_024 ,__a = 768 ,__a = 12 ,__a = 12 ,__a = None ,__a = "gelu_new" ,__a = 0.1 ,__a = 0.1 ,__a = 0.1 ,__a = 1E-5 ,__a = 0.02 ,__a = True ,__a = True ,__a = False ,__a = False ,) -> Optional[int]:
super().__init__()
snake_case : List[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
snake_case : Union[str, Any] = prefix_inner_dim
snake_case : str = prefix_hidden_dim
snake_case : str = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case : List[Any] = (
nn.Linear(self.prefix_hidden_dim ,a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case : List[str] = GPTaConfig(
vocab_size=a_ ,n_positions=a_ ,n_embd=a_ ,n_layer=a_ ,n_head=a_ ,n_inner=a_ ,activation_function=a_ ,resid_pdrop=a_ ,embd_pdrop=a_ ,attn_pdrop=a_ ,layer_norm_epsilon=a_ ,initializer_range=a_ ,scale_attn_weights=a_ ,use_cache=a_ ,scale_attn_by_inverse_layer_idx=a_ ,reorder_and_upcast_attn=a_ ,)
snake_case : Optional[Any] = GPTaLMHeadModel(a_ )
def snake_case_ ( self ,__a ,__a ,__a = None ,__a = None ,) -> str:
snake_case : Tuple = self.transformer.transformer.wte(a_ )
snake_case : List[str] = self.encode_prefix(a_ )
snake_case : Optional[Any] = self.decode_prefix(a_ )
snake_case : Any = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
snake_case : Optional[Any] = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
snake_case : Tuple = torch.cat((dummy_token, input_ids) ,dim=1 )
snake_case : Any = self.transformer(inputs_embeds=a_ ,labels=a_ ,attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case_ ( self ,__a ,__a ) -> torch.Tensor:
return torch.zeros(a_ ,self.prefix_length ,dtype=torch.intaa ,device=a_ )
def snake_case_ ( self ,__a ) -> Any:
return self.encode_prefix(a_ )
@torch.no_grad()
def snake_case_ ( self ,__a ,__a ,__a ) -> Optional[int]:
snake_case : Any = torch.split(a_ ,1 ,dim=0 )
snake_case : int = []
snake_case : Dict = []
for feature in features:
snake_case : Dict = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
snake_case : Optional[Any] = self.generate_beam(
input_embeds=a_ ,device=a_ ,eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case : Optional[int] = torch.stack(a_ )
snake_case : Optional[int] = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case_ ( self ,__a=None ,__a=None ,__a=None ,__a = 5 ,__a = 67 ,__a = 1.0 ,__a = None ,) -> List[str]:
snake_case : List[str] = eos_token_id
snake_case : Tuple = None
snake_case : Optional[int] = None
snake_case : List[str] = torch.ones(a_ ,device=a_ ,dtype=torch.int )
snake_case : List[str] = torch.zeros(a_ ,device=a_ ,dtype=torch.bool )
if input_embeds is not None:
snake_case : Dict = input_embeds
else:
snake_case : Union[str, Any] = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
snake_case : Dict = self.transformer(inputs_embeds=a_ )
snake_case : int = outputs.logits
snake_case : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case : Tuple = logits.softmax(-1 ).log()
if scores is None:
snake_case : List[Any] = logits.topk(a_ ,-1 )
snake_case : List[str] = generated.expand(a_ ,*generated.shape[1:] )
snake_case : Optional[Any] = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
snake_case : Dict = next_tokens
else:
snake_case : Dict = tokens.expand(a_ ,*tokens.shape[1:] )
snake_case : List[Any] = torch.cat((tokens, next_tokens) ,dim=1 )
else:
snake_case : List[str] = -float(np.inf )
snake_case : Any = 0
snake_case : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case : Dict = scores_sum / seq_lengths[:, None]
snake_case : Union[str, Any] = scores_sum_average.view(-1 ).topk(a_ ,-1 )
snake_case : str = next_tokens // scores_sum.shape[1]
snake_case : str = seq_lengths[next_tokens_source]
snake_case : Union[str, Any] = next_tokens % scores_sum.shape[1]
snake_case : Any = next_tokens.unsqueeze(1 )
snake_case : int = tokens[next_tokens_source]
snake_case : List[str] = torch.cat((tokens, next_tokens) ,dim=1 )
snake_case : Any = generated[next_tokens_source]
snake_case : List[str] = scores_sum_average * seq_lengths
snake_case : int = is_stopped[next_tokens_source]
snake_case : int = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
snake_case : List[Any] = torch.cat((generated, next_token_embed) ,dim=1 )
snake_case : Tuple = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
snake_case : Dict = scores / seq_lengths
snake_case : List[Any] = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
snake_case : Optional[Any] = [tokens[i] for i in order]
snake_case : Any = torch.stack(a_ ,dim=0 )
snake_case : Union[str, Any] = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 116 | import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : int = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.mean(1 )
# Centralize the data of class i
SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 )
SCREAMING_SNAKE_CASE__ : List[str] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.shape[1]
SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : str = device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
if features.any():
SCREAMING_SNAKE_CASE__ : Any = features.mean(1 )
# Center the dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh(
covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions]
SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] )
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ )
if not np.allclose(lowercase__ , lowercase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ):
lowercase_ : Optional[Any] = "Speech2TextFeatureExtractor"
lowercase_ : int = "Speech2TextTokenizer"
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
super().__init__(a_ , a_ )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
def __call__( self : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase__ = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase__ = kwargs.pop('''audio''' , a_ )
lowerCAmelCase__ = kwargs.pop('''sampling_rate''' , a_ )
lowerCAmelCase__ = kwargs.pop('''text''' , a_ )
if len(a_ ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if text is not None:
lowerCAmelCase__ = self.tokenizer(a_ , **a_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings['input_ids']
return inputs
def A__ ( self : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def A__ ( self : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@contextmanager
def A__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 615 | import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = "Hello world! cécé herlolip"
SCREAMING_SNAKE_CASE__ : Dict = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertAbsConfig(
temp_dir='.' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage )
SCREAMING_SNAKE_CASE__ : Any = AbsSummarizer(lowercase__ , torch.device('cpu' ) , lowercase__ )
original.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = BertAbsSummarizer(lowercase__ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE__ : int = encoder_input_ids
SCREAMING_SNAKE_CASE__ : Any = decoder_input_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE__ : Optional[Any] = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = original.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = new_model(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = new_model.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 85 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["LayoutLMv2FeatureExtractor"]
a = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size
SCREAMING_SNAKE_CASE__ : str = hidden_sizes
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : str = len(a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_config()
return config, pixel_values, labels
def __lowercase( self : str )-> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase_ = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
pass
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ):
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ : List[Any] = layer_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(a_ , a_ , a_ )
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ):
SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple()
def recursive_check(a_ : List[Any] , a_ : int ):
if isinstance(a_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ):
recursive_check(a_ , a_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(a_ , a_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(a_ , a_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
def __lowercase( self : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def __lowercase( self : Any )-> List[str]:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
| 85 | 0 |
_lowerCamelCase : Union[str, Any] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 663 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( UpperCamelCase_ ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : TransformeraDModel , __SCREAMING_SNAKE_CASE : AutoencoderKL , __SCREAMING_SNAKE_CASE : KarrasDiffusionSchedulers , __SCREAMING_SNAKE_CASE : Optional[Dict[int, str]] = None , ) -> Union[str, Any]:
super().__init__()
self.register_modules(transformer=a_ , vae=a_ , scheduler=a_ )
# create a imagenet -> id dictionary for easier use
lowerCamelCase_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
lowerCamelCase_ = int(a_ )
lowerCamelCase_ = dict(sorted(self.labels.items() ) )
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, List[str]] ) -> List[int]:
if not isinstance(a_ , a_ ):
lowerCamelCase_ = list(a_ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : float = 4.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
lowerCamelCase_ = len(a_ )
lowerCamelCase_ = self.transformer.config.sample_size
lowerCamelCase_ = self.transformer.config.in_channels
lowerCamelCase_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=a_ , device=self.device , dtype=self.transformer.dtype , )
lowerCamelCase_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCamelCase_ = torch.tensor(a_ , device=self.device ).reshape(-1 )
lowerCamelCase_ = torch.tensor([1000] * batch_size , device=self.device )
lowerCamelCase_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCamelCase_ = latent_model_input[: len(a_ ) // 2]
lowerCamelCase_ = torch.cat([half, half] , dim=0 )
lowerCamelCase_ = self.scheduler.scale_model_input(a_ , a_ )
lowerCamelCase_ = t
if not torch.is_tensor(a_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCamelCase_ = latent_model_input.device.type == 'mps'
if isinstance(a_ , a_ ):
lowerCamelCase_ = torch.floataa if is_mps else torch.floataa
else:
lowerCamelCase_ = torch.intaa if is_mps else torch.intaa
lowerCamelCase_ = torch.tensor([timesteps] , dtype=a_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCamelCase_ = self.transformer(
a_ , timestep=a_ , class_labels=a_ ).sample
# perform guidance
if guidance_scale > 1:
lowerCamelCase_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCamelCase_ = torch.split(a_ , len(a_ ) // 2 , dim=0 )
lowerCamelCase_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCamelCase_ = torch.cat([half_eps, half_eps] , dim=0 )
lowerCamelCase_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCamelCase_ = torch.split(a_ , a_ , dim=1 )
else:
lowerCamelCase_ = noise_pred
# compute previous image: x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(a_ , a_ , a_ ).prev_sample
if guidance_scale > 1:
lowerCamelCase_ = latent_model_input.chunk(2 , dim=0 )
else:
lowerCamelCase_ = latent_model_input
lowerCamelCase_ = 1 / self.vae.config.scaling_factor * latents
lowerCamelCase_ = self.vae.decode(a_ ).sample
lowerCamelCase_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(a_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=a_ )
| 549 | def _a ( lowercase__ : int , lowercase__ : list ):
'''simple docstring'''
_enforce_args(lowercase__ , lowercase__ )
if n == 0:
return 0
SCREAMING_SNAKE_CASE__ : str = float('-inf' )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : int = max(
lowercase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase__ ) )
return max_revue
def _a ( lowercase__ : int , lowercase__ : list ):
'''simple docstring'''
_enforce_args(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : str = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase__ , lowercase__ , lowercase__ )
def _a ( lowercase__ : int , lowercase__ : list , lowercase__ : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
SCREAMING_SNAKE_CASE__ : List[str] = float('-inf' )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : Any = max(
lowercase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase__ , lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : Tuple = max_revenue
return max_rev[n]
def _a ( lowercase__ : int , lowercase__ : list ):
'''simple docstring'''
_enforce_args(lowercase__ , lowercase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
SCREAMING_SNAKE_CASE__ : Optional[int] = [float('-inf' ) for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : int = 0
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(lowercase__ , prices[j - 1] + max_rev[i - j] )
SCREAMING_SNAKE_CASE__ : Dict = max_revenue_i
return max_rev[n]
def _a ( lowercase__ : int , lowercase__ : list ):
'''simple docstring'''
if n < 0:
SCREAMING_SNAKE_CASE__ : Tuple = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowercase__ )
if n > len(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = (
'Each integral piece of rod must have a corresponding price. '
f'''Got n = {n} but length of prices = {len(lowercase__ )}'''
)
raise ValueError(lowercase__ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [6, 10, 12, 15, 20, 23]
SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowercase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
SCREAMING_SNAKE_CASE__ : Optional[Any] = 36
SCREAMING_SNAKE_CASE__ : Tuple = top_down_cut_rod(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = bottom_up_cut_rod(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = naive_cut_rod_recursive(lowercase__ , lowercase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 85 | 0 |
"""simple docstring"""
lowerCamelCase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase_ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
} | 498 | import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
SCREAMING_SNAKE_CASE__ : Any = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = CamembertTokenizer
lowercase_ = CamembertTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Dict = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '<pad>'
SCREAMING_SNAKE_CASE__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a_ ) , 1004 )
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def __lowercase( self : List[Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(a_ , add_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def __lowercase( self : List[str] )-> Dict:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE__ : str = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
| 85 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class snake_case__ ( UpperCamelCase_ ):
A__ = ['''image_processor''', '''feature_extractor''']
A__ = '''TvltImageProcessor'''
A__ = '''TvltFeatureExtractor'''
def __init__( self : int , __a : List[str] , __a : Dict ) -> str:
'''simple docstring'''
super().__init__(image_processor=a_ , feature_extractor=a_ )
__snake_case : Optional[Any] = image_processor
__snake_case : Any = feature_extractor
def __call__( self : Optional[Any] , __a : int=None , __a : str=None , __a : int=None , __a : Tuple=None , __a : Tuple=False , __a : Dict=False , *__a : Tuple , **__a : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
__snake_case : Optional[Any] = None
if images is not None:
__snake_case : Tuple = self.image_processor(a_ , mask_pixel=a_ , *a_ , **a_ )
if images_mixed is not None:
__snake_case : List[str] = self.image_processor(a_ , is_mixed=a_ , *a_ , **a_ )
if audio is not None:
__snake_case : Tuple = self.feature_extractor(
a_ , *a_ , sampling_rate=a_ , mask_audio=a_ , **a_ )
__snake_case : Optional[Any] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.image_processor.model_input_names
__snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 286 | from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE__ : Any = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["DPTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ : Tuple = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : int = get_failure_array(lowercase__ )
# 2) Step through text searching for pattern
lowerCAmelCase_ : Dict = 0, 0 # index into text, pattern
while i < len(lowercase__ ):
if pattern[j] == text[i]:
if j == (len(lowercase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCAmelCase_ : Tuple = failure[j - 1]
continue
i += 1
return False
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = [0]
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[Any] = 1
while j < len(lowercase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCAmelCase_ : Optional[int] = failure[i - 1]
continue
j += 1
failure.append(lowercase__ )
return failure
if __name__ == "__main__":
# Test 1)
__A : Optional[Any] = "abc1abc12"
__A : Any = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A : Dict = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__A : Optional[int] = "ABABX"
__A : Tuple = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
__A : Any = "AAAB"
__A : Optional[int] = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
__A : Optional[Any] = "abcdabcy"
__A : str = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
__A : int = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 275 | from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : Union[str, Any] , )-> None:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_pad
SCREAMING_SNAKE_CASE__ : Any = pad_size
def __lowercase( self : str , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str )-> np.ndarray:
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def __lowercase( self : Any , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_image_size(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(a_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a_ )
def __lowercase( self : Tuple , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE__ : List[str] = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE__ : str = [self.pad(a_ , size=a_ ) for image in images]
SCREAMING_SNAKE_CASE__ : List[str] = [to_channel_dimension_format(a_ , a_ ) for image in images]
SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 85 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : str = emb.weight.shape
lowercase : str = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
lowercase : Tuple = emb.weight.data
return lin_layer
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : List[str] = torch.load(lowercase__ , map_location="""cpu""" )
lowercase : Optional[Any] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowercase : Tuple = mam_aaa['model']
remove_ignore_keys_(lowercase__ )
lowercase : List[str] = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase : str = MaMaaaConfig(
vocab_size=lowercase__ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowercase : Optional[int] = state_dict['decoder.embed_tokens.weight']
lowercase : int = MaMaaaForConditionalGeneration(lowercase__ )
model.model.load_state_dict(lowercase__ , strict=lowercase__ )
lowercase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowercase : Any = parser.parse_args()
lowercase : Union[str, Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 336 | from pathlib import Path
import numpy as np
from PIL import Image
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros_like(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE__ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE__ : List[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE__ : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ : int = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
SCREAMING_SNAKE_CASE__ : int = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 85 | 0 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = 1.5
__UpperCamelCase = int(factor * num_class_images )
__UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" ,indice_name="laion_400m" ,num_images=lowercase__ ,aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" ,exist_ok=lowercase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
__UpperCamelCase = client.query(text=lowercase__ )
if len(lowercase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
__UpperCamelCase = int(factor * num_images )
__UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" ,indice_name="laion_400m" ,num_images=lowercase__ ,aesthetic_weight=0.1 ,)
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = tqdm(desc="downloading real regularization images" ,total=lowercase__ )
with open(f"""{class_data_dir}/caption.txt""" ,"w" ) as fa, open(f"""{class_data_dir}/urls.txt""" ,"w" ) as fa, open(
f"""{class_data_dir}/images.txt""" ,"w" ) as fa:
while total < num_class_images:
__UpperCamelCase = class_images[count]
count += 1
try:
__UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
__UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" ,"wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase () -> Any:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser("" ,add_help=lowercase__ )
parser.add_argument("--class_prompt" ,help="text prompt to retrieve images" ,required=lowercase__ ,type=lowercase__ )
parser.add_argument("--class_data_dir" ,help="path to save images" ,required=lowercase__ ,type=lowercase__ )
parser.add_argument("--num_class_images" ,help="number of images to download" ,default=200 ,type=lowercase__ )
return parser.parse_args()
if __name__ == "__main__":
_A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 505 | def _a ( lowercase__ : int = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE__ : str = i
while n % i == 0:
SCREAMING_SNAKE_CASE__ : List[Any] = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( UpperCamelCase_ ):
_a : Optional[Any] = ['image_processor', 'tokenizer']
_a : int = 'BridgeTowerImageProcessor'
_a : List[str] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : Dict , lowerCamelCase : str , lowerCamelCase : Tuple ):
super().__init__(a_ , a_ )
def __call__( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Union[bool, str, TruncationStrategy] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : List[Any] , ):
lowerCamelCase_ : Optional[int] = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
lowerCamelCase_ : Union[str, Any] = self.image_processor(
a_ , return_tensors=a_ , do_normalize=a_ , do_center_crop=a_ , **a_ )
encoding.update(a_ )
return encoding
def __a ( self : Dict , *lowerCamelCase : Any , **lowerCamelCase : int ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def __a ( self : str , *lowerCamelCase : int , **lowerCamelCase : str ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def __a ( self : Optional[int] ):
lowerCamelCase_ : Tuple = self.tokenizer.model_input_names
lowerCamelCase_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 364 | def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(lowercase__ )
if n_element < 1:
SCREAMING_SNAKE_CASE__ : Tuple = ValueError('a should be a positive number' )
raise my_error
SCREAMING_SNAKE_CASE__ : Any = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = (0, 0, 0)
SCREAMING_SNAKE_CASE__ : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
SCREAMING_SNAKE_CASE__ : int = hamming(int(n))
print("-----------------------------------------------------")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("-----------------------------------------------------")
| 85 | 0 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase : Tuple = HfApi()
lowercase : Union[str, Any] = {}
# fmt: off
lowercase : str = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
lowercase : List[str] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
lowercase : Optional[Any] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
lowercase : str = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
lowercase : Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
lowercase : int = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
lowercase : Optional[Any] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
lowercase : Dict = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
lowercase : Optional[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
lowercase : str = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
lowercase : Dict = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
lowercase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
lowercase : Union[str, Any] = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
lowercase : str = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
lowercase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
lowercase : Tuple = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase : Tuple = "/home/patrick/google_checkpoints/" + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
lowercase : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
lowercase : List[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase : Dict = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase : Optional[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 116 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = multiprocessing.Manager()
lowerCAmelCase__ = manager.list()
lowerCAmelCase__ = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCAmelCase__ = shutil.rmtree
lowerCAmelCase__ = os.rmdir
lowerCAmelCase__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCAmelCase__ = {}
with swallow_io():
with time_limit(lowercase__ ):
exec(lowercase__ , lowercase__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
lowerCAmelCase__ = rmtree
lowerCAmelCase__ = rmdir
lowerCAmelCase__ = chdir
@contextlib.contextmanager
def a_ ( __lowerCAmelCase ):
def signal_handler(__lowerCAmelCase , __lowerCAmelCase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , lowercase__ )
signal.signal(signal.SIGALRM , lowercase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def a_ ( ):
lowerCAmelCase__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase__ ):
with contextlib.redirect_stderr(lowercase__ ):
with redirect_stdin(lowercase__ ):
yield
@contextlib.contextmanager
def a_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase__ ):
yield dirname
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ):
pass
class SCREAMING_SNAKE_CASE__ (io.StringIO ):
def A__ ( self : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
raise OSError
def A__ ( self : Tuple , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ):
"""simple docstring"""
raise OSError
def A__ ( self : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
raise OSError
def A__ ( self : Union[str, Any] , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
return False
class SCREAMING_SNAKE_CASE__ (contextlib._RedirectStream ): # type: ignore
lowercase_ : Any = "stdin"
@contextlib.contextmanager
def a_ ( __lowerCAmelCase ):
if root == ".":
yield
return
lowerCAmelCase__ = os.getcwd()
os.chdir(lowercase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase__ )
def a_ ( __lowerCAmelCase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCAmelCase__ = None
lowerCAmelCase__ = None
import os
lowerCAmelCase__ = '1'
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
import shutil
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
import subprocess
lowerCAmelCase__ = None # type: ignore
lowerCAmelCase__ = None
import sys
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
| 615 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class lowercase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ):
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
def __call__( self : Optional[int] ):
_A = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_A = 1
_A = self.unet(a_ , a_ ).sample
_A = self.scheduler.step(a_ , a_ , a_ ).prev_sample
_A = scheduler_output - scheduler_output + torch.ones_like(a_ )
return result
| 7 | import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
SCREAMING_SNAKE_CASE__ : int = imread(r"digital_image_processing/image_data/lena_small.jpg")
SCREAMING_SNAKE_CASE__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = cn.convert_to_negative(lowercase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _a ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE__ : List[str] = canny.canny(lowercase__ )
# assert canny array for at least one True
assert canny_array.any()
def _a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ )
assert res.any()
def _a ( ):
'''simple docstring'''
assert med.median_filter(lowercase__ , 3 ).any()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = sob.sobel_filter(lowercase__ )
assert grad.any() and theta.any()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = sp.make_sepia(lowercase__ , 20 )
assert sepia.all()
def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE__ : Dict = imread(lowercase__ , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Any = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE__ : List[Any] = lbp.get_neighbors_pixel(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE__ : str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ )
assert lbp_image.any()
| 85 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCamelCase : List[Any] = get_logger()
_lowerCamelCase : Optional[dict] = None
class lowerCamelCase (TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Dict=None, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__(features=a_ )
import jax
from jaxlib.xla_client import Device
if isinstance(a_, a_ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(a_ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = device if isinstance(a_, a_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE__ : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
SCREAMING_SNAKE_CASE__ : Tuple = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp_array_kwargs
@staticmethod
def A_ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(a_ ): device for device in jax.devices()}
def A_ ( self : str, _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a_, a_ ) and column:
if all(
isinstance(a_, jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a_, axis=0 )
return column
def A_ ( self : int, _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(a_, (str, bytes, type(a_ )) ):
return value
elif isinstance(a_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
if isinstance(a_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE__ : str = {'dtype': jnp.intaa}
else:
SCREAMING_SNAKE_CASE__ : List[Any] = {'dtype': jnp.intaa}
elif isinstance(a_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_, PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a_, **{**default_dtype, **self.jnp_array_kwargs} )
def A_ ( self : Optional[Any], _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a_, torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a_, "__array__" ) and not isinstance(a_, jax.Array ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_, np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def A_ ( self : Union[str, Any], _UpperCAmelCase : dict ) -> List[str]:
"""simple docstring"""
return map_nested(self._recursive_tensorize, a_, map_list=a_ )
def A_ ( self : Dict, _UpperCAmelCase : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.numpy_arrow_extractor().extract_row(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def A_ ( self : Any, _UpperCAmelCase : pa.Table ) -> "jax.Array":
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.numpy_arrow_extractor().extract_column(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.python_features_decoder.decode_column(a_, pa_table.column_names[0] )
SCREAMING_SNAKE_CASE__ : Dict = self.recursive_tensorize(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self._consolidate(a_ )
return column
def A_ ( self : Optional[int], _UpperCAmelCase : pa.Table ) -> Mapping:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.numpy_arrow_extractor().extract_batch(a_ )
SCREAMING_SNAKE_CASE__ : str = self.python_features_decoder.decode_batch(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.recursive_tensorize(a_ )
for column_name in batch:
SCREAMING_SNAKE_CASE__ : Tuple = self._consolidate(batch[column_name] )
return batch
| 663 | import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] , a_ : Any )-> str:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(a_ )
def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]:
"""simple docstring"""
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ )
SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(
a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ )
print(a_ )
self.assertGreaterEqual(scores['bleu'] , a_ )
| 85 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ) -> Any:
if openai_config_file == "":
lowerCamelCase_ = OpenAIGPTConfig()
else:
lowerCamelCase_ = OpenAIGPTConfig.from_json_file(lowercase__ )
lowerCamelCase_ = OpenAIGPTModel(lowercase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
lowerCamelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 549 | import os
import pytest
from attr import dataclass
SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5_500,
}
lowercase_ = {**hyperparameters, 'max_steps': 1_000}
@property
def __lowercase( self : List[str] )-> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
return F'''{self.framework}-transfromers-test'''
@property
def __lowercase( self : int )-> str:
"""simple docstring"""
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _a ( lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 85 | 0 |
"""simple docstring"""
def __lowerCamelCase ( a_ : str , a_ : bool = False ) -> List[str]:
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE :Any = f'''Expected string as input, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = f'''Expected boolean as use_pascal parameter, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
__SCREAMING_SNAKE_CASE :Any = input_str.split('''_''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0 if use_pascal else 1
__SCREAMING_SNAKE_CASE :int = words[start_index:]
__SCREAMING_SNAKE_CASE :Dict = [word[0].upper() + word[1:] for word in words_to_capitalize]
__SCREAMING_SNAKE_CASE :List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod() | 498 | import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = FunnelTokenizer
lowercase_ = FunnelTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : str = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase( self : Any , **a_ : Any )-> List[str]:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Tuple , **a_ : List[Any] )-> List[Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Optional[Any] , a_ : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE__ : int = 'unwanted, running'
return input_text, output_text
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(a_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 10, 8, 9] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer('UNwant\u00E9d,running' )
SCREAMING_SNAKE_CASE__ : List[Any] = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 85 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Tuple = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 286 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'levit'
def __init__( self : str , a_ : Optional[Any]=224 , a_ : List[str]=3 , a_ : Any=3 , a_ : Any=2 , a_ : Tuple=1 , a_ : int=16 , a_ : Optional[int]=[128, 256, 384] , a_ : Dict=[4, 8, 12] , a_ : List[str]=[4, 4, 4] , a_ : Any=[16, 16, 16] , a_ : Dict=0 , a_ : Tuple=[2, 2, 2] , a_ : Union[str, Any]=[2, 2, 2] , a_ : Optional[Any]=0.02 , **a_ : str , )-> Any:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Any = kernel_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = stride
SCREAMING_SNAKE_CASE__ : Any = padding
SCREAMING_SNAKE_CASE__ : Any = hidden_sizes
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = key_dim
SCREAMING_SNAKE_CASE__ : int = drop_path_rate
SCREAMING_SNAKE_CASE__ : List[str] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = attention_ratio
SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case ( UpperCamelCase_ ):
lowercase_ = version.parse('1.11' )
@property
def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase( self : Any )-> float:
"""simple docstring"""
return 1e-4
| 85 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
__A : Optional[int] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = os.path.dirname(os.path.realpath(lowercase__ ) )
lowerCAmelCase_ : Optional[Any] = os.path.join(lowercase__ , """words.txt""" )
lowerCAmelCase_ : int = ''
with open(lowercase__ ) as f:
lowerCAmelCase_ : int = f.readline()
lowerCAmelCase_ : List[Any] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
lowerCAmelCase_ : Dict = [
word
for word in [sum(ord(lowercase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 275 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = StableDiffusionInstructPixaPixPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase( self : str )-> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(a_ )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=0 )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' )
if str(a_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'french fries'
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**a_ , negative_prompt=a_ )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [inputs['prompt']] * 2
SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ : Tuple = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ : int = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Any = [round(a_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(a_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : int = VaeImageProcessor(do_resize=a_ , do_normalize=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = components['vae']
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self : List[Any] , a_ : Dict=0 )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
SCREAMING_SNAKE_CASE__ : Tuple = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : int )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 0
def callback_fn(a_ : int , a_ : int , a_ : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE__ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : Tuple = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
pipe(**a_ , callback=a_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase( self : int )-> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ )
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ : Dict = inputs['image'].resize((504, 504) )
SCREAMING_SNAKE_CASE__ : List[Any] = 'timbrooks/instruct-pix2pix'
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
a_ , safety_checker=a_ , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Any = pipe(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 85 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __snake_case ( UpperCamelCase_ ):
_a : Dict= ["image_processor", "tokenizer"]
_a : Optional[Any]= "BlipImageProcessor"
_a : int= "AutoTokenizer"
def __init__( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
super().__init__(a_ ,a_ )
# add QFormer tokenizer
lowercase : Tuple = qformer_tokenizer
def __call__( self ,snake_case = None ,snake_case = None ,snake_case = True ,snake_case = False ,snake_case = None ,snake_case = None ,snake_case = 0 ,snake_case = None ,snake_case = None ,snake_case = False ,snake_case = False ,snake_case = False ,snake_case = False ,snake_case = False ,snake_case = True ,snake_case = None ,**snake_case ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
lowercase : List[str] = BatchFeature()
if text is not None:
lowercase : Any = self.tokenizer(
text=a_ ,add_special_tokens=a_ ,padding=a_ ,truncation=a_ ,max_length=a_ ,stride=a_ ,pad_to_multiple_of=a_ ,return_attention_mask=a_ ,return_overflowing_tokens=a_ ,return_special_tokens_mask=a_ ,return_offsets_mapping=a_ ,return_token_type_ids=a_ ,return_length=a_ ,verbose=a_ ,return_tensors=a_ ,**a_ ,)
encoding.update(a_ )
lowercase : List[str] = self.qformer_tokenizer(
text=a_ ,add_special_tokens=a_ ,padding=a_ ,truncation=a_ ,max_length=a_ ,stride=a_ ,pad_to_multiple_of=a_ ,return_attention_mask=a_ ,return_overflowing_tokens=a_ ,return_special_tokens_mask=a_ ,return_offsets_mapping=a_ ,return_token_type_ids=a_ ,return_length=a_ ,verbose=a_ ,return_tensors=a_ ,**a_ ,)
lowercase : List[str] = qformer_text_encoding.pop("""input_ids""" )
lowercase : Optional[Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
lowercase : Tuple = self.image_processor(a_ ,return_tensors=a_ )
encoding.update(a_ )
return encoding
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ ,**a_ )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.decode(*a_ ,**a_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.tokenizer.model_input_names
lowercase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,**snake_case ):
'''simple docstring'''
if os.path.isfile(a_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(a_ ,exist_ok=a_ )
lowercase : str = os.path.join(a_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(a_ )
return super().save_pretrained(a_ ,**a_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Tuple = AutoTokenizer.from_pretrained(a_ ,subfolder="""qformer_tokenizer""" )
lowercase : Any = cls._get_arguments_from_pretrained(a_ ,**a_ )
args.append(a_ )
return cls(*a_ )
| 336 | import math
from collections.abc import Callable
def _a ( lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : float = xa
SCREAMING_SNAKE_CASE__ : float = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
SCREAMING_SNAKE_CASE__ : float = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
SCREAMING_SNAKE_CASE__ : Dict = x_na
SCREAMING_SNAKE_CASE__ : List[str] = x_na
def _a ( lowercase__ : float ):
'''simple docstring'''
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 85 | 0 |
"""simple docstring"""
import math
def lowercase (_snake_case ,_snake_case ) -> int:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowercase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law") | 505 | from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'AutoImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self : List[Any] , a_ : int , a_ : Union[str, Any] )-> List[Any]:
"""simple docstring"""
super().__init__(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = self.image_processor
def __call__( self : Tuple , a_ : str=None , a_ : List[Any]=None , a_ : Optional[Any]=None , **a_ : Dict )-> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : Dict , *a_ : Any , **a_ : Any )-> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Dict , *a_ : Union[str, Any] , **a_ : Optional[int] )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
@register_to_config
def __init__( self : List[str] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : bool = False , ):
super().__init__()
lowerCamelCase_ : Dict = nn.Embedding(a_ , a_ )
lowerCamelCase_ : int = nn.Embedding(a_ , a_ )
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : Union[str, Any] = nn.Dropout(p=a_ )
lowerCamelCase_ : Dict = TaConfig(
vocab_size=a_ , d_model=a_ , num_heads=a_ , d_kv=a_ , d_ff=a_ , dropout_rate=a_ , feed_forward_proj=a_ , is_decoder=a_ , is_encoder_decoder=a_ , )
lowerCamelCase_ : int = nn.ModuleList()
for lyr_num in range(a_ ):
lowerCamelCase_ : Tuple = TaBlock(a_ )
self.encoders.append(a_ )
lowerCamelCase_ : Union[str, Any] = TaLayerNorm(a_ )
lowerCamelCase_ : int = nn.Dropout(p=a_ )
def __a ( self : List[str] , lowerCamelCase : int , lowerCamelCase : List[Any] ):
lowerCamelCase_ : Optional[Any] = self.token_embedder(a_ )
lowerCamelCase_ : List[Any] = encoder_input_tokens.shape[1]
lowerCamelCase_ : int = torch.arange(a_ , device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
lowerCamelCase_ : List[Any] = self.dropout_pre(a_ )
# inverted the attention mask
lowerCamelCase_ : List[str] = encoder_input_tokens.size()
lowerCamelCase_ : Union[str, Any] = self.get_extended_attention_mask(a_ , a_ )
for lyr in self.encoders:
lowerCamelCase_ : Optional[Any] = lyr(a_ , a_ )[0]
lowerCamelCase_ : Union[str, Any] = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 364 | import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( lowercase__ : int = 3 ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
SCREAMING_SNAKE_CASE__ : Tuple = QuantumRegister(lowercase__ , 'qr' )
SCREAMING_SNAKE_CASE__ : int = ClassicalRegister(lowercase__ , 'cr' )
SCREAMING_SNAKE_CASE__ : Tuple = QuantumCircuit(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ , lowercase__ )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend('qasm_simulator' )
SCREAMING_SNAKE_CASE__ : Tuple = execute(lowercase__ , lowercase__ , shots=1_00_00 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 85 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( __lowercase = 10 , __lowercase = 22 ):
snake_case : List[Any] = range(1 , lowercase__ )
snake_case : List[Any] = range(1 , lowercase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 116 | import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : int = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.mean(1 )
# Centralize the data of class i
SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 )
SCREAMING_SNAKE_CASE__ : List[str] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.shape[1]
SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : str = device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
if features.any():
SCREAMING_SNAKE_CASE__ : Any = features.mean(1 )
# Center the dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh(
covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions]
SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] )
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ )
if not np.allclose(lowercase__ , lowercase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__magic_name__ : int = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ):
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , *__lowerCamelCase : int , **__lowerCamelCase : List[str] ):
"""simple docstring"""
super().__init__(*a_ , **a_ )
if config is None:
assert isinstance(self.model , a_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowerCAmelCase__ = self.model.config
else:
lowerCAmelCase__ = config
lowerCAmelCase__ = data_args
lowerCAmelCase__ = self.config.tgt_vocab_size if isinstance(self.config , a_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
lowerCAmelCase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase__ = label_smoothed_nll_loss
def A__ ( self : List[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if self.optimizer is None:
lowerCAmelCase__ = ['bias', 'LayerNorm.weight']
lowerCAmelCase__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
lowerCAmelCase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase__ = Adafactor
lowerCAmelCase__ = {'scale_parameter': False, 'relative_step': False}
else:
lowerCAmelCase__ = AdamW
lowerCAmelCase__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
lowerCAmelCase__ = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase__ = OSS(
params=a_ , optim=a_ , **a_ , )
else:
lowerCAmelCase__ = optimizer_cls(a_ , **a_ )
if self.lr_scheduler is None:
lowerCAmelCase__ = self._get_lr_scheduler(a_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def A__ ( self : Tuple , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=a_ )
return scheduler
def A__ ( self : List[Any] ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def A__ ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase__ = model(**a_ , use_cache=a_ )[0]
lowerCAmelCase__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase__ = model(**a_ , labels=a_ , use_cache=a_ )[:2]
else:
# compute label smoothed loss
lowerCAmelCase__ = model(**a_ , use_cache=a_ )[0]
lowerCAmelCase__ = torch.nn.functional.log_softmax(a_ , dim=-1 )
lowerCAmelCase__ = self.loss_fn(a_ , a_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def A__ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = inputs.pop('''labels''' )
lowerCAmelCase__ = self._compute_loss(a_ , a_ , a_ )
return loss
def A__ ( self : Union[str, Any] , __lowerCamelCase : nn.Module , __lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCamelCase : bool , __lowerCamelCase : Optional[List[str]] = None , ):
"""simple docstring"""
lowerCAmelCase__ = self._prepare_inputs(a_ )
lowerCAmelCase__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase__ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **a_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase__ = self._pad_tensors_to_max_len(a_ , gen_kwargs['''max_length'''] )
lowerCAmelCase__ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase__ = self._compute_loss(a_ , a_ , a_ )
lowerCAmelCase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase__ = self._pad_tensors_to_max_len(a_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def A__ ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
lowerCAmelCase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCAmelCase__ = tensor
return padded_tensor
| 615 | import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = "Hello world! cécé herlolip"
SCREAMING_SNAKE_CASE__ : Dict = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertAbsConfig(
temp_dir='.' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage )
SCREAMING_SNAKE_CASE__ : Any = AbsSummarizer(lowercase__ , torch.device('cpu' ) , lowercase__ )
original.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = BertAbsSummarizer(lowercase__ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE__ : int = encoder_input_ids
SCREAMING_SNAKE_CASE__ : Any = decoder_input_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE__ : Optional[Any] = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = original.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = new_model(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = new_model.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 85 | 0 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowercase_ :
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : int=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : str=4 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[Any]=512 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Optional[int]=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase_ ( self : Dict ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def lowerCAmelCase_ ( self : Any ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
_A = MPNetModel(config=a_ )
model.to(a_ )
model.eval()
_A = model(a_ , a_ )
_A = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ):
_A = MPNetForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_A = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ):
_A = self.num_labels
_A = MPNetForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_A = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ):
_A = self.num_choices
_A = MPNetForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
_A = self.num_labels
_A = MPNetForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_A = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : str ):
_A = self.prepare_config_and_inputs()
(_A) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
UpperCAmelCase : str = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = True
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = MPNetModelTester(self )
_A = ConfigTester(self , config_class=a_ , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a_ )
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a_ )
def lowerCAmelCase_ ( self : str ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a_ )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a_ )
def lowerCAmelCase_ ( self : Any ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a_ )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : str ):
_A = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_A = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_A = model(a_ )[0]
_A = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
_A = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 7 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size
SCREAMING_SNAKE_CASE__ : str = hidden_sizes
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : str = len(a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_config()
return config, pixel_values, labels
def __lowercase( self : str )-> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase_ = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
pass
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ):
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ : List[Any] = layer_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(a_ , a_ , a_ )
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ):
SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple()
def recursive_check(a_ : List[Any] , a_ : int ):
if isinstance(a_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ):
recursive_check(a_ , a_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(a_ , a_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(a_ , a_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
def __lowercase( self : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def __lowercase( self : Any )-> List[str]:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
| 85 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCamelCase (UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ = "deta"
UpperCAmelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any], _UpperCAmelCase : Dict=None, _UpperCAmelCase : Tuple=9_0_0, _UpperCAmelCase : Any=2_0_4_8, _UpperCAmelCase : List[str]=6, _UpperCAmelCase : int=2_0_4_8, _UpperCAmelCase : Union[str, Any]=8, _UpperCAmelCase : List[Any]=6, _UpperCAmelCase : List[Any]=1_0_2_4, _UpperCAmelCase : Union[str, Any]=8, _UpperCAmelCase : List[Any]=0.0, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : str="relu", _UpperCAmelCase : Any=2_5_6, _UpperCAmelCase : Optional[Any]=0.1, _UpperCAmelCase : Dict=0.0, _UpperCAmelCase : Union[str, Any]=0.0, _UpperCAmelCase : Optional[int]=0.02, _UpperCAmelCase : Optional[Any]=1.0, _UpperCAmelCase : Dict=True, _UpperCAmelCase : int=False, _UpperCAmelCase : List[str]="sine", _UpperCAmelCase : Dict=5, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : Dict=True, _UpperCAmelCase : str=3_0_0, _UpperCAmelCase : Union[str, Any]=True, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : List[Any]=1, _UpperCAmelCase : List[str]=5, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : List[str]=1, _UpperCAmelCase : Dict=1, _UpperCAmelCase : List[str]=5, _UpperCAmelCase : List[Any]=2, _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : int=0.25, **_UpperCAmelCase : List[str], ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE__ : Optional[int] = CONFIG_MAPPING['resnet'](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a_, a_ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone_config.pop("model_type" )
SCREAMING_SNAKE_CASE__ : Dict = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : List[Any] = config_class.from_dict(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = backbone_config
SCREAMING_SNAKE_CASE__ : Any = num_queries
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : str = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = dropout
SCREAMING_SNAKE_CASE__ : Dict = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE__ : int = activation_function
SCREAMING_SNAKE_CASE__ : List[Any] = init_std
SCREAMING_SNAKE_CASE__ : List[Any] = init_xavier_std
SCREAMING_SNAKE_CASE__ : str = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : Tuple = position_embedding_type
# deformable attributes
SCREAMING_SNAKE_CASE__ : List[Any] = num_feature_levels
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_n_points
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_n_points
SCREAMING_SNAKE_CASE__ : Any = two_stage
SCREAMING_SNAKE_CASE__ : Union[str, Any] = two_stage_num_proposals
SCREAMING_SNAKE_CASE__ : Any = with_box_refine
SCREAMING_SNAKE_CASE__ : Union[str, Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Dict = class_cost
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_cost
SCREAMING_SNAKE_CASE__ : int = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Any = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : str = eos_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = focal_alpha
super().__init__(is_encoder_decoder=a_, **a_ )
@property
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.d_model
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.__class__.model_type
return output
| 663 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a ( metaclass=UpperCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = ["""torch""", """torchsde"""]
def __init__( self : Tuple , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def UpperCamelCase ( cls : Optional[Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def UpperCamelCase ( cls : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : str ) -> str:
requires_backends(cls , ['torch', 'torchsde'] )
| 549 | def _a ( lowercase__ : int , lowercase__ : list ):
'''simple docstring'''
_enforce_args(lowercase__ , lowercase__ )
if n == 0:
return 0
SCREAMING_SNAKE_CASE__ : str = float('-inf' )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : int = max(
lowercase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase__ ) )
return max_revue
def _a ( lowercase__ : int , lowercase__ : list ):
'''simple docstring'''
_enforce_args(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : str = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase__ , lowercase__ , lowercase__ )
def _a ( lowercase__ : int , lowercase__ : list , lowercase__ : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
SCREAMING_SNAKE_CASE__ : List[str] = float('-inf' )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : Any = max(
lowercase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase__ , lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : Tuple = max_revenue
return max_rev[n]
def _a ( lowercase__ : int , lowercase__ : list ):
'''simple docstring'''
_enforce_args(lowercase__ , lowercase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
SCREAMING_SNAKE_CASE__ : Optional[int] = [float('-inf' ) for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : int = 0
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(lowercase__ , prices[j - 1] + max_rev[i - j] )
SCREAMING_SNAKE_CASE__ : Dict = max_revenue_i
return max_rev[n]
def _a ( lowercase__ : int , lowercase__ : list ):
'''simple docstring'''
if n < 0:
SCREAMING_SNAKE_CASE__ : Tuple = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowercase__ )
if n > len(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = (
'Each integral piece of rod must have a corresponding price. '
f'''Got n = {n} but length of prices = {len(lowercase__ )}'''
)
raise ValueError(lowercase__ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [6, 10, 12, 15, 20, 23]
SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowercase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
SCREAMING_SNAKE_CASE__ : Optional[Any] = 36
SCREAMING_SNAKE_CASE__ : Tuple = top_down_cut_rod(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = bottom_up_cut_rod(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = naive_cut_rod_recursive(lowercase__ , lowercase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 85 | 0 |
"""simple docstring"""
def __lowerCamelCase ( a_ : str ) -> Tuple:
assert column_title.isupper()
__SCREAMING_SNAKE_CASE :str = 0
__SCREAMING_SNAKE_CASE :List[Any] = len(lowercase__ ) - 1
__SCREAMING_SNAKE_CASE :str = 0
while index >= 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = (ord(column_title[index] ) - 64) * pow(26 , lowercase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 498 | import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
SCREAMING_SNAKE_CASE__ : Any = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = CamembertTokenizer
lowercase_ = CamembertTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Dict = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '<pad>'
SCREAMING_SNAKE_CASE__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a_ ) , 1004 )
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def __lowercase( self : List[Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(a_ , add_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def __lowercase( self : List[str] )-> Dict:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE__ : str = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
| 85 | 0 |
'''simple docstring'''
import functools
from typing import Any
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : list[str] ) -> List[str]:
if not isinstance(lowercase__ ,lowercase__ ) or len(lowercase__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowercase__ ,lowercase__ ) or not all(
isinstance(lowercase__ ,lowercase__ ) and len(lowercase__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
__snake_case : dict[str, Any] = {}
__snake_case : List[Any] = 'WORD_KEEPER'
for word in words:
__snake_case : List[Any] = trie
for c in word:
if c not in trie_node:
__snake_case : Optional[int] = {}
__snake_case : Any = trie_node[c]
__snake_case : Optional[int] = True
__snake_case : int = len(lowercase__ )
# Dynamic programming method
@functools.cache
def is_breakable(_UpperCAmelCase : int ) -> bool:
if index == len_string:
return True
__snake_case : Tuple = trie
for i in range(lowercase__ ,lowercase__ ):
__snake_case : Any = trie_node.get(string[i] ,lowercase__ )
if trie_node is None:
return False
if trie_node.get(lowercase__ ,lowercase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 | from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE__ : Any = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["DPTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ : Tuple = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : list[float] ):
'''simple docstring'''
if len(lowercase__ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
lowerCAmelCase_ : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : Union[str, Any] , )-> None:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_pad
SCREAMING_SNAKE_CASE__ : Any = pad_size
def __lowercase( self : str , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str )-> np.ndarray:
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def __lowercase( self : Any , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_image_size(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(a_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a_ )
def __lowercase( self : Tuple , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE__ : List[str] = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE__ : str = [self.pad(a_ , size=a_ ) for image in images]
SCREAMING_SNAKE_CASE__ : List[str] = [to_channel_dimension_format(a_ , a_ ) for image in images]
SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 85 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Dict = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Union[str, Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Any = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] )
return (item, float(lowercase__ ))
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : str = random.randint(0 , len(lowercase__ ) - 1 )
lowercase : List[Any] = parent_a[:random_slice] + parent_a[random_slice:]
lowercase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : int = list(lowercase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase : List[str] = random.choice(lowercase__ )
return "".join(lowercase__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> str:
lowercase : Any = []
# Generate more children proportionally to the fitness score.
lowercase : Optional[Any] = int(parent_a[1] * 100 ) + 1
lowercase : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(lowercase__ ):
lowercase : Tuple = population_score[random.randint(0 , lowercase__ )][0]
lowercase : Tuple = crossover(parent_a[0] , lowercase__ )
# Append new string to the population list.
pop.append(mutate(lowercase__ , lowercase__ ) )
pop.append(mutate(lowercase__ , lowercase__ ) )
return pop
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True ) -> List[str]:
if N_POPULATION < N_SELECTED:
lowercase : Union[str, Any] = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(lowercase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase : int = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(lowercase__ )
# Generate random starting population.
lowercase : Any = []
for _ in range(lowercase__ ):
population.append("""""".join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase : Optional[int] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase : Union[str, Any] = [evaluate(lowercase__ , lowercase__ ) for item in population]
# Check if there is a matching evolution.
lowercase : List[Any] = sorted(lowercase__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=lowercase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase__ )
# Normalize population score to be between 0 and 1.
lowercase : List[Any] = [
(item, score / len(lowercase__ )) for item, score in population_score
]
# This is selection
for i in range(lowercase__ ):
population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : Any = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : Union[str, Any] = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
lowercase : Optional[Any] = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 336 | from pathlib import Path
import numpy as np
from PIL import Image
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros_like(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE__ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE__ : List[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE__ : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ : int = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
SCREAMING_SNAKE_CASE__ : int = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 85 | 0 |
"""simple docstring"""
_A = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_A = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_A = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_A = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_A = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_A = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_A = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_A = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 505 | def _a ( lowercase__ : int = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE__ : str = i
while n % i == 0:
SCREAMING_SNAKE_CASE__ : List[Any] = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 0 |
import random
class UpperCamelCase_ :
@staticmethod
def __a ( lowerCamelCase : str ):
lowerCamelCase_ : str = [ord(a_ ) for i in text]
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Any = []
for i in plain:
lowerCamelCase_ : Dict = random.randint(1 , 3_00 )
lowerCamelCase_ : List[Any] = (i + k) * k
cipher.append(a_ )
key.append(a_ )
return cipher, key
@staticmethod
def __a ( lowerCamelCase : list[int] , lowerCamelCase : list[int] ):
lowerCamelCase_ : Tuple = []
for i in range(len(a_ ) ):
lowerCamelCase_ : Dict = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(a_ ) )
return "".join(a_ )
if __name__ == "__main__":
_lowercase : Dict =Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 364 | def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(lowercase__ )
if n_element < 1:
SCREAMING_SNAKE_CASE__ : Tuple = ValueError('a should be a positive number' )
raise my_error
SCREAMING_SNAKE_CASE__ : Any = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = (0, 0, 0)
SCREAMING_SNAKE_CASE__ : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
SCREAMING_SNAKE_CASE__ : int = hamming(int(n))
print("-----------------------------------------------------")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("-----------------------------------------------------")
| 85 | 0 |
'''simple docstring'''
import requests
lowercase : Union[str, Any] = "YOUR API KEY"
def lowerCamelCase__ ( __lowercase , __lowercase = giphy_api_key ):
snake_case : Optional[int] = '+'.join(query.split() )
snake_case : Optional[int] = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
snake_case : Union[str, Any] = requests.get(lowercase__ ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 116 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
# Function to print upper half of diamond (pyramid)
def a_ ( __lowerCAmelCase ):
for i in range(0 , lowercase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def a_ ( __lowerCAmelCase ):
for i in range(lowercase__ , 0 , -1 ):
for _ in range(lowercase__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def a_ ( __lowerCAmelCase ):
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(lowercase__ ) # upper half
reverse_floyd(lowercase__ ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
__magic_name__ : List[str] = 1
while K:
__magic_name__ : List[str] = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
__magic_name__ : Any = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 615 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Tuple=10 , _UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , _UpperCAmelCase : List[Any]=[1, 1, 2, 1] , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Any="relu" , _UpperCAmelCase : int=3 , _UpperCAmelCase : List[Any]=None , ):
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = embeddings_size
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = hidden_act
_A = num_labels
_A = scope
_A = len(a_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : str ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ):
_A = TFRegNetModel(config=a_ )
_A = model(a_ , training=a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ):
_A = self.num_labels
_A = TFRegNetForImageClassification(a_ )
_A = model(a_ , labels=a_ , training=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase : Dict = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase : str = False
UpperCAmelCase : Any = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : str = False
def lowerCAmelCase_ ( self : int ):
_A = TFRegNetModelTester(self )
_A = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def lowerCAmelCase_ ( self : List[Any] ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self : str ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self : Any ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Tuple ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a_ )
_A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def lowerCAmelCase_ ( self : str ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowerCAmelCase_ ( self : List[Any] ):
def check_hidden_states_output(_UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ):
_A = model_class(a_ )
_A = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_A = self.model_tester.prepare_config_and_inputs_for_common()
_A = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A = layer_type
_A = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(a_ , a_ , a_ )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]={} ):
_A = model(a_ , return_dict=a_ , **a_ )
_A = model(a_ , return_dict=a_ , **a_ ).to_tuple()
def recursive_check(_UpperCAmelCase : List[Any] , _UpperCAmelCase : int ):
if isinstance(a_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ):
recursive_check(a_ , a_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(a_ , a_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(a_ , a_ )
for model_class in self.all_model_classes:
_A = model_class(a_ )
_A = self._prepare_for_class(a_ , a_ )
_A = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ )
_A = self._prepare_for_class(a_ , a_ , return_labels=a_ )
_A = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ )
_A = self._prepare_for_class(a_ , a_ )
_A = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
_A = self._prepare_for_class(a_ , a_ , return_labels=a_ )
_A = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self : str ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFRegNetModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : Any ):
_A = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a_ , return_tensors='tf' )
# forward pass
_A = model(**a_ , training=a_ )
# verify the logits
_A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , a_ )
_A = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1E-4 )
| 7 | import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
SCREAMING_SNAKE_CASE__ : int = imread(r"digital_image_processing/image_data/lena_small.jpg")
SCREAMING_SNAKE_CASE__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = cn.convert_to_negative(lowercase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _a ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE__ : List[str] = canny.canny(lowercase__ )
# assert canny array for at least one True
assert canny_array.any()
def _a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ )
assert res.any()
def _a ( ):
'''simple docstring'''
assert med.median_filter(lowercase__ , 3 ).any()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = sob.sobel_filter(lowercase__ )
assert grad.any() and theta.any()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = sp.make_sepia(lowercase__ , 20 )
assert sepia.all()
def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE__ : Dict = imread(lowercase__ , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Any = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE__ : List[Any] = lbp.get_neighbors_pixel(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE__ : str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ )
assert lbp_image.any()
| 85 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCamelCase (UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ = ["input_features", "attention_mask"]
def __init__( self : List[Any], _UpperCAmelCase : List[Any]=8_0, _UpperCAmelCase : Tuple=1_6_0_0_0, _UpperCAmelCase : Tuple=0.0, _UpperCAmelCase : List[Any]=1_0, _UpperCAmelCase : int=2_5, _UpperCAmelCase : Any="hamming_window", _UpperCAmelCase : Any=3_2_7_6_8.0, _UpperCAmelCase : Any=0.97, _UpperCAmelCase : List[str]=1.0, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Optional[Any]=True, _UpperCAmelCase : str=False, **_UpperCAmelCase : Optional[int], ) -> int:
"""simple docstring"""
super().__init__(feature_size=a_, sampling_rate=a_, padding_value=a_, **a_ )
SCREAMING_SNAKE_CASE__ : Any = feature_size
SCREAMING_SNAKE_CASE__ : Tuple = sampling_rate
SCREAMING_SNAKE_CASE__ : List[Any] = padding_value
SCREAMING_SNAKE_CASE__ : Optional[Any] = hop_length
SCREAMING_SNAKE_CASE__ : List[Any] = win_length
SCREAMING_SNAKE_CASE__ : List[str] = frame_signal_scale
SCREAMING_SNAKE_CASE__ : Tuple = preemphasis_coeff
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : List[str] = normalize_means
SCREAMING_SNAKE_CASE__ : Any = normalize_vars
SCREAMING_SNAKE_CASE__ : Tuple = win_function
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
SCREAMING_SNAKE_CASE__ : Any = win_length * sampling_rate // 1_0_0_0
SCREAMING_SNAKE_CASE__ : Tuple = hop_length * sampling_rate // 1_0_0_0
SCREAMING_SNAKE_CASE__ : Dict = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE__ : Dict = (self.n_fft // 2) + 1
def A_ ( self : Optional[Any], _UpperCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE__ : Tuple = window_function(window_length=self.sample_size, name=self.win_function, periodic=a_ )
else:
SCREAMING_SNAKE_CASE__ : Any = window_function(window_length=self.sample_size, name=self.win_function )
SCREAMING_SNAKE_CASE__ : Any = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate, )
SCREAMING_SNAKE_CASE__ : List[str] = spectrogram(
one_waveform * self.frame_signal_scale, window=a_, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=a_, preemphasis=self.preemphasis_coeff, mel_filters=a_, mel_floor=self.mel_floor, log_mel="log", )
return msfc_features.T
def A_ ( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
# make sure we normalize float32 arrays
if self.normalize_means:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ : int = np.subtract(a_, a_ )
if self.normalize_vars:
SCREAMING_SNAKE_CASE__ : Dict = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.divide(a_, a_ )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ : Any = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ : Any = x.astype(np.floataa )
return x
def A_ ( self : Tuple, _UpperCAmelCase : List[np.ndarray], _UpperCAmelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(a_, a_, self.padding_value ) for x, n in zip(a_, a_ )]
def __call__( self : int, _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False, _UpperCAmelCase : Optional[int] = None, _UpperCAmelCase : bool = False, _UpperCAmelCase : Optional[int] = None, _UpperCAmelCase : Optional[bool] = None, _UpperCAmelCase : Optional[Union[str, TensorType]] = None, _UpperCAmelCase : Optional[int] = None, **_UpperCAmelCase : str, ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = isinstance(a_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_batched_numpy or (
isinstance(a_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a_, np.ndarray ):
SCREAMING_SNAKE_CASE__ : Tuple = np.asarray(a_, dtype=np.floataa )
elif isinstance(a_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ : str = [self._extract_mfsc_features(a_ ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ : Tuple = BatchFeature({"input_features": features} )
SCREAMING_SNAKE_CASE__ : List[Any] = self.pad(
a_, padding=a_, max_length=a_, truncation=a_, pad_to_multiple_of=a_, return_attention_mask=a_, **a_, )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0], a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_, dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ : int = padded_inputs.get("attention_mask" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_, dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE__ : int = (
np.array(a_, dtype=np.intaa )
if self._get_padding_strategies(a_, max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE__ : Any = self.normalize(
padded_inputs["input_features"], attention_mask=a_ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
| 663 | import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] , a_ : Any )-> str:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(a_ )
def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]:
"""simple docstring"""
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ )
SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(
a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ )
print(a_ )
self.assertGreaterEqual(scores['bleu'] , a_ )
| 85 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 549 | import os
import pytest
from attr import dataclass
SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5_500,
}
lowercase_ = {**hyperparameters, 'max_steps': 1_000}
@property
def __lowercase( self : List[str] )-> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
return F'''{self.framework}-transfromers-test'''
@property
def __lowercase( self : int )-> str:
"""simple docstring"""
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _a ( lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( a_ : list[int] ) -> Any: # This function is recursive
__SCREAMING_SNAKE_CASE :str = len(lowercase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__SCREAMING_SNAKE_CASE :Optional[int] = array[0]
__SCREAMING_SNAKE_CASE :int = False
__SCREAMING_SNAKE_CASE :Any = 1
__SCREAMING_SNAKE_CASE :list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__SCREAMING_SNAKE_CASE :List[str] = True
__SCREAMING_SNAKE_CASE :Any = [element for element in array[i:] if element >= array[i]]
__SCREAMING_SNAKE_CASE :Optional[Any] = longest_subsequence(lowercase__ )
if len(lowercase__ ) > len(lowercase__ ):
__SCREAMING_SNAKE_CASE :Any = temp_array
else:
i += 1
__SCREAMING_SNAKE_CASE :Any = [element for element in array[1:] if element >= pivot]
__SCREAMING_SNAKE_CASE :str = [pivot, *longest_subsequence(lowercase__ )]
if len(lowercase__ ) > len(lowercase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod() | 498 | import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = FunnelTokenizer
lowercase_ = FunnelTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : str = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase( self : Any , **a_ : Any )-> List[str]:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Tuple , **a_ : List[Any] )-> List[Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Optional[Any] , a_ : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE__ : int = 'unwanted, running'
return input_text, output_text
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(a_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 10, 8, 9] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer('UNwant\u00E9d,running' )
SCREAMING_SNAKE_CASE__ : List[Any] = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 85 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class snake_case__ ( yaml.SafeLoader ):
def A_ ( self : Any , __a : Dict ) -> Any:
'''simple docstring'''
__snake_case : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
__snake_case : List[Any] = [tuple(a_ ) if isinstance(a_ , a_ ) else key for key in keys]
__snake_case : Union[str, Any] = Counter(a_ )
__snake_case : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def A_ ( self : Any , __a : Union[str, Any] , __a : Optional[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = super().construct_mapping(a_ , deep=a_ )
self._check_no_duplicates_on_constructed_node(a_ )
return mapping
def a_ ( _UpperCAmelCase : str ) -> Union[str, Any]:
__snake_case : Union[str, Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__snake_case : int = full_content[1:].index('---' ) + 1
__snake_case : List[Any] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase__ )
class snake_case__ ( UpperCamelCase_ ):
# class attributes
A__ = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def A_ ( cls : Tuple , __a : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(a_ , encoding='utf-8' ) as readme_file:
__snake_case : Optional[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(a_ )
else:
return cls()
def A_ ( self : Union[str, Any] , __a : Path ) -> Union[str, Any]:
'''simple docstring'''
if path.exists():
with open(a_ , encoding='utf-8' ) as readme_file:
__snake_case : Dict = readme_file.read()
else:
__snake_case : Optional[Any] = None
__snake_case : int = self._to_readme(a_ )
with open(a_ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(a_ )
def A_ ( self : str , __a : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
__snake_case : List[str] = _split_yaml_from_readme(a_ )
__snake_case : Tuple = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__snake_case : List[str] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def A_ ( cls : List[str] , __a : str ) -> "DatasetMetadata":
'''simple docstring'''
__snake_case : str = yaml.load(a_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__snake_case : Tuple = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**a_ )
def A_ ( self : List[str] ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=a_ , allow_unicode=a_ , encoding='utf-8' , ).decode('utf-8' )
A__ : Optional[int] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A__ : str = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
A__ : Union[str, Any] = ap.parse_args()
A__ : Union[str, Any] = Path(args.readme_filepath)
A__ : List[str] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 286 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'levit'
def __init__( self : str , a_ : Optional[Any]=224 , a_ : List[str]=3 , a_ : Any=3 , a_ : Any=2 , a_ : Tuple=1 , a_ : int=16 , a_ : Optional[int]=[128, 256, 384] , a_ : Dict=[4, 8, 12] , a_ : List[str]=[4, 4, 4] , a_ : Any=[16, 16, 16] , a_ : Dict=0 , a_ : Tuple=[2, 2, 2] , a_ : Union[str, Any]=[2, 2, 2] , a_ : Optional[Any]=0.02 , **a_ : str , )-> Any:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Any = kernel_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = stride
SCREAMING_SNAKE_CASE__ : Any = padding
SCREAMING_SNAKE_CASE__ : Any = hidden_sizes
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = key_dim
SCREAMING_SNAKE_CASE__ : int = drop_path_rate
SCREAMING_SNAKE_CASE__ : List[str] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = attention_ratio
SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case ( UpperCamelCase_ ):
lowercase_ = version.parse('1.11' )
@property
def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase( self : Any )-> float:
"""simple docstring"""
return 1e-4
| 85 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Any = logging.get_logger(__name__)
__a :List[str] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'visual_bert'
def __init__( self : Any , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : Any=768 , UpperCAmelCase : List[Any]=512 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : str=3072 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=512 , UpperCAmelCase : int=2 , UpperCAmelCase : str=0.02 , UpperCAmelCase : str=1E-12 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=1 , UpperCAmelCase : Any=0 , UpperCAmelCase : Dict=2 , **UpperCAmelCase : str , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = visual_embedding_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
A_ = bypass_transformer
A_ = special_visual_initialize | 86 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A_ = f'''{src_lang}-{tgt_lang}'''
A_ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = os.path.join(__UpperCamelCase ,"README.md" )
print(f'''Generating {path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__a :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__a :Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a , __a , __a :int = model_name.split('-')
__a :str = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 86 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 86 | 1 |
from __future__ import annotations
from typing import TypedDict
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str
_lowerCamelCase : int
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__UpperCamelCase ) )]
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
A_ = all_rotations(__UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
A_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCamelCase ),
}
return response
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
A_ = int(__UpperCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__UpperCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
A_ = [""] * len(__UpperCamelCase )
for _ in range(len(__UpperCamelCase ) ):
for i in range(len(__UpperCamelCase ) ):
A_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__a :List[str] = 'Provide a string that I will generate its BWT transform: '
__a :List[str] = input(entry_msg).strip()
__a :Optional[Any] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
__a :Tuple = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
) | 86 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,)
def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ):
A_ = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase )
return config
def __A ( self : Optional[Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def __A ( self : Dict ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def __A ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def __A ( self : Tuple ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def __A ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def __A ( self : Optional[int] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def __A ( self : Tuple ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = self.dummy_sample_deter + 0.1
A_ = self.dummy_sample_deter - 0.1
A_ = samplea.shape[0]
A_ = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase )
A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="v_prediction" )
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __A ( self : Union[str, Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
A_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
A_ = -1
else:
A_ = timesteps[i + 1]
A_ = scheduler.previous_timestep(UpperCAmelCase )
A_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
A_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase ) | 86 | 1 |
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __snake_case ( ):
"""simple docstring"""
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 86 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with open(__UpperCamelCase ) as metadata_file:
A_ = json.load(__UpperCamelCase )
A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__UpperCamelCase )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__UpperCamelCase ).eval()
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" )
A_ = model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {}
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__a :Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 86 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _a ( yaml.SafeLoader ):
"""simple docstring"""
def __A ( self : Optional[Any] , UpperCAmelCase : int ):
A_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
A_ = [tuple(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else key for key in keys]
A_ = Counter(UpperCAmelCase )
A_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def __A ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=False ):
A_ = super().construct_mapping(UpperCAmelCase , deep=UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase )
return mapping
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A_ = full_content[1:].index("---" ) + 1
A_ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__UpperCamelCase )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __A ( cls : Dict , UpperCAmelCase : Path ):
with open(UpperCAmelCase , encoding="utf-8" ) as readme_file:
A_ , A_ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase )
else:
return cls()
def __A ( self : Optional[int] , UpperCAmelCase : Path ):
if path.exists():
with open(UpperCAmelCase , encoding="utf-8" ) as readme_file:
A_ = readme_file.read()
else:
A_ = None
A_ = self._to_readme(UpperCAmelCase )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[str] = None ):
if readme_content is not None:
A_ , A_ = _split_yaml_from_readme(UpperCAmelCase )
A_ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
A_ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __A ( cls : Tuple , UpperCAmelCase : str ):
A_ = yaml.load(UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A_ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase )
def __A ( self : List[Any] ):
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase , allow_unicode=UpperCAmelCase , encoding="utf-8" , ).decode("utf-8" )
__a :Optional[Any] = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__a :Optional[int] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__a :str = ap.parse_args()
__a :Optional[Any] = Path(args.readme_filepath)
__a :Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 86 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 86 | 1 |
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [[0 for _ in range(__UpperCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
A_ = 1
for n in range(m + 1 ):
for k in range(1 ,__UpperCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__a :Tuple = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__a :Optional[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.') | 86 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a :Optional[Any] = 'src/transformers'
__a :Tuple = 'docs/source/en/tasks'
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start prompt.
A_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
A_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a :List[str] = direct_transformers_import(TRANSFORMERS_PATH)
__a :Optional[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a :Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = TASK_GUIDE_TO_MODELS[task_guide]
A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
A_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ):
"""simple docstring"""
A_ , A_ , A_ , A_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,)
A_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 86 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str ,__UpperCamelCase : Tuple=[] ):
"""simple docstring"""
A_ = size[0] - overlap_pixels * 2
A_ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
A_ = np.ones((size_y, size_x) ,dtype=np.uinta ) * 255
A_ = np.pad(__UpperCamelCase ,mode="linear_ramp" ,pad_width=__UpperCamelCase ,end_values=0 )
if "l" in remove_borders:
A_ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
A_ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
A_ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
A_ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Dict ,__UpperCamelCase : Dict ):
"""simple docstring"""
return max(__UpperCamelCase ,min(__UpperCamelCase ,__UpperCamelCase ) )
def __snake_case ( __UpperCamelCase : [int] ,__UpperCamelCase : [int] ,__UpperCamelCase : [int] ):
"""simple docstring"""
return (
clamp(rect[0] ,min[0] ,max[0] ),
clamp(rect[1] ,min[1] ,max[1] ),
clamp(rect[2] ,min[0] ,max[0] ),
clamp(rect[3] ,min[1] ,max[1] ),
)
def __snake_case ( __UpperCamelCase : [int] ,__UpperCamelCase : int ,__UpperCamelCase : [int] ):
"""simple docstring"""
A_ = list(__UpperCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
A_ = clamp_rect(__UpperCamelCase ,[0, 0] ,[image_size[0], image_size[1]] )
return rect
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = Image.new("RGB" ,(tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) ,Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) ,(0, 0) ,)
result.paste(__UpperCamelCase ,(original_slice, 0) )
return result
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
A_ = tile.crop(__UpperCamelCase )
return tile
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = n % d
return n - divisor
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 350 , ):
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def __A ( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ):
torch.manual_seed(0 )
A_ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
A_ = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
A_ = image.crop(UpperCAmelCase )
A_ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
A_ = translated_slice_x - (original_image_slice / 2)
A_ = max(0 , UpperCAmelCase )
A_ = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = to_input.size
A_ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
A_ = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
A_ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
A_ = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
A_ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
A_ = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
A_ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode="L" , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 75 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 128 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 32 , ):
A_ = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
A_ = math.ceil(image.size[0] / tile_size )
A_ = math.ceil(image.size[1] / tile_size )
A_ = tcx * tcy
A_ = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def __snake_case ( ):
"""simple docstring"""
A_ = "stabilityai/stable-diffusion-x4-upscaler"
A_ = StableDiffusionTiledUpscalePipeline.from_pretrained(__UpperCamelCase ,revision="fp16" ,torch_dtype=torch.floataa )
A_ = pipe.to("cuda" )
A_ = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(__UpperCamelCase : Optional[int] ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save("diffusers_library_progress.jpg" )
A_ = pipe(image=__UpperCamelCase ,prompt="Black font, white background, vector" ,noise_level=40 ,callback=__UpperCamelCase )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main() | 86 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 86 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
def decorator(__UpperCamelCase : Union[str, Any] ):
A_ = getattr(__UpperCamelCase ,"handle_key" ,[] )
handle += [key]
setattr(__UpperCamelCase ,"handle_key" ,__UpperCamelCase )
return func
return decorator
def __snake_case ( *__UpperCamelCase : List[str] ):
"""simple docstring"""
def decorator(__UpperCamelCase : str ):
A_ = getattr(__UpperCamelCase ,"handle_key" ,[] )
handle += keys
setattr(__UpperCamelCase ,"handle_key" ,__UpperCamelCase )
return func
return decorator
class _a ( snake_case_ ):
"""simple docstring"""
def __new__( cls : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ):
A_ = super().__new__(cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not hasattr(UpperCAmelCase , "key_handler" ):
setattr(UpperCAmelCase , "key_handler" , {} )
setattr(UpperCAmelCase , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
A_ = getattr(UpperCAmelCase , "handle_key" , [] )
for key in handled_keys:
A_ = value
return new_cls
@staticmethod
def __A ( cls : Optional[int] ):
A_ = get_character()
if char != KEYMAP["undefined"]:
A_ = ord(UpperCAmelCase )
A_ = cls.key_handler.get(UpperCAmelCase )
if handler:
A_ = char
return handler(cls )
else:
return None
def __snake_case ( cls : List[str] ):
"""simple docstring"""
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() ) | 86 |
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__a :List[Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :List[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
from collections import deque
from .hash_table import HashTable
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCAmelCase )
A_ = self.values[key]
def __A ( self : List[Any] ):
return (
sum(self.charge_factor - len(UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : int=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(UpperCAmelCase , UpperCAmelCase ) | 86 |
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__a :int = '\\n\n'
__a :Any = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__a :List[str] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int = 16 , UpperCAmelCase : bool = True , UpperCAmelCase : List[Any]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A_ = "cuda"
else:
A_ = "cuda" if torch.cuda.is_available() else "cpu"
A_ = AutoModelForCausalLM.from_pretrained(UpperCAmelCase )
A_ = model.to(UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A_ = model.config.max_length - 1
else:
A_ = model.config.max_length
A_ = tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors="pt" , return_attention_mask=UpperCAmelCase , ).to(UpperCAmelCase )
A_ = encodings["input_ids"]
A_ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A_ = []
A_ = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(UpperCAmelCase ) , UpperCAmelCase ) ):
A_ = min(start_index + batch_size , len(UpperCAmelCase ) )
A_ = encoded_texts[start_index:end_index]
A_ = attn_masks[start_index:end_index]
if add_start_token:
A_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCAmelCase )
A_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCAmelCase ), attn_mask] , dim=1 )
A_ = encoded_batch
with torch.no_grad():
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase ).logits
A_ = out_logits[..., :-1, :].contiguous()
A_ = labels[..., 1:].contiguous()
A_ = attn_mask[..., 1:].contiguous()
A_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase )} | 86 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__a :Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case_ )
class _a :
"""simple docstring"""
_lowerCamelCase : str
_lowerCamelCase : str
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case_ )
class _a :
"""simple docstring"""
_lowerCamelCase : List[int]
_lowerCamelCase : Optional[List[int]] = None
_lowerCamelCase : Optional[List[int]] = None
_lowerCamelCase : Optional[Union[int, float]] = None
_lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[InputFeatures]
def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str=False , UpperCAmelCase : bool = False , ):
A_ = hans_processors[task]()
A_ = os.path.join(
UpperCAmelCase , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
A_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ , A_ = label_list[2], label_list[1]
A_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
A_ = torch.load(UpperCAmelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
A_ = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info("Training examples: %s" , len(UpperCAmelCase ) )
A_ = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info("Saving features into cached file %s" , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : int , UpperCAmelCase : Optional[Any] ):
return self.features[i]
def __A ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
_lowerCamelCase : List[InputFeatures]
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : int=False , UpperCAmelCase : bool = False , ):
A_ = hans_processors[task]()
A_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ , A_ = label_list[2], label_list[1]
A_ = label_list
A_ = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
A_ = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __A ( self : List[Any] ):
return self.dataset
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : Any , UpperCAmelCase : str ):
return self.features[i]
def __A ( self : str ):
return self.label_list
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , "heuristics_train_set.txt" ) ) , "train" )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , "heuristics_evaluation_set.txt" ) ) , "dev" )
def __A ( self : Union[str, Any] ):
return ["contradiction", "entailment", "neutral"]
def __A ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : str ):
A_ = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
A_ = "%s-%s" % (set_type, line[0])
A_ = line[5]
A_ = line[6]
A_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
A_ = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def __snake_case ( __UpperCamelCase : List[InputExample] ,__UpperCamelCase : List[str] ,__UpperCamelCase : int ,__UpperCamelCase : PreTrainedTokenizer ,):
"""simple docstring"""
A_ = {label: i for i, label in enumerate(__UpperCamelCase )}
A_ = []
for ex_index, example in tqdm.tqdm(enumerate(__UpperCamelCase ) ,desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d" % (ex_index) )
A_ = tokenizer(
example.text_a ,example.text_b ,add_special_tokens=__UpperCamelCase ,max_length=__UpperCamelCase ,padding="max_length" ,truncation=__UpperCamelCase ,return_overflowing_tokens=__UpperCamelCase ,)
A_ = label_map[example.label] if example.label in label_map else 0
A_ = int(example.pairID )
features.append(InputFeatures(**__UpperCamelCase ,label=__UpperCamelCase ,pairID=__UpperCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
__a :Union[str, Any] = {
'hans': 3,
}
__a :Any = {
'hans': HansProcessor,
} | 86 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a :List[str] = logging.get_logger(__name__)
__a :Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__a :Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ = None
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
elif name.split("." )[0] == "proj":
A_ = fairseq_model.proj
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name:
A_ = "bias"
elif "weight" in name:
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ , A_ = emb.weight.shape
A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.split(" " )[0] for line in lines]
A_ = len(__UpperCamelCase )
A_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,):
"""simple docstring"""
A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase )
A_ = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase )
A_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ = model[0].eval()
# set weights for wav2vec2 encoder
A_ = WavaVecaModel(__UpperCamelCase )
A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase )
A_ = SpeechaTextaForCausalLM(__UpperCamelCase )
A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
A_ = False
# add projection layer
A_ = nn.Parameter(projection_layer.weight )
A_ = nn.Parameter(projection_layer.bias )
A_ = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) )
tokenizer.save_pretrained(__UpperCamelCase )
A_ = hf_wavavec.config.to_dict()
A_ = tokenizer.pad_token_id
A_ = tokenizer.bos_token_id
A_ = tokenizer.eos_token_id
A_ = "speech_to_text_2"
A_ = "wav2vec2"
A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__a :Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 86 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : str ,__UpperCamelCase : set ,__UpperCamelCase : set ,__UpperCamelCase : dict ,__UpperCamelCase : dict ,__UpperCamelCase : PriorityQueue ,__UpperCamelCase : dict ,__UpperCamelCase : float | int ,):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(__UpperCamelCase ,np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : dict ,__UpperCamelCase : dict ):
"""simple docstring"""
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(__UpperCamelCase )
A_ , A_ = queue_backward.get()
visited_backward.add(__UpperCamelCase )
A_ = pass_and_relaxation(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
A_ = pass_and_relaxation(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__a :List[str] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
__a :Union[str, Any] = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 1 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 'pixel_values'
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , "timm" )
super().__init__(UpperCAmelCase )
A_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCAmelCase , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
A_ = getattr(UpperCAmelCase , "use_pretrained_backbone" , UpperCAmelCase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
A_ = config.out_indices if getattr(UpperCAmelCase , "out_indices" , UpperCAmelCase ) is not None else (-1,)
A_ = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A_ = self._backbone.return_layers
A_ = {layer["module"]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def __A ( cls : Optional[int] , UpperCAmelCase : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
A_ = kwargs.pop("config" , TimmBackboneConfig() )
A_ = kwargs.pop("use_timm_backbone" , UpperCAmelCase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
A_ = kwargs.pop("num_channels" , config.num_channels )
A_ = kwargs.pop("features_only" , config.features_only )
A_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
A_ = kwargs.pop("out_indices" , config.out_indices )
A_ = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : Optional[int] ):
pass
def __A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : int ):
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A_ = self._all_layers
A_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
A_ = self._return_layers
A_ = tuple(hidden_states[i] for i in self.out_indices )
else:
A_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
A_ = None
A_ = tuple(UpperCAmelCase )
A_ = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
A_ = (feature_maps,)
if output_hidden_states:
A_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase ) | 86 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 | 1 |
from typing import Any
import numpy as np
def __snake_case ( __UpperCamelCase : np.ndarray ):
"""simple docstring"""
return np.array_equal(__UpperCamelCase ,matrix.conjugate().T )
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ):
"""simple docstring"""
A_ = v.conjugate().T
A_ = v_star.dot(__UpperCamelCase )
assert isinstance(__UpperCamelCase ,np.ndarray )
return (v_star_dot.dot(__UpperCamelCase )) / (v_star.dot(__UpperCamelCase ))
def __snake_case ( ):
"""simple docstring"""
A_ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A_ = np.array([[1], [2], [3]] )
assert is_hermitian(__UpperCamelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(__UpperCamelCase ,__UpperCamelCase ) )
A_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__UpperCamelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(__UpperCamelCase ,__UpperCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 86 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : List[Any]=18 , UpperCAmelCase : Optional[int]=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=True , ):
A_ = size if size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = apply_ocr
def __A ( self : str ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self : Any ):
A_ = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "apply_ocr" ) )
def __A ( self : Any ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Optional[int] ):
pass
def __A ( self : List[str] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase )
self.assertIsInstance(encoding.boxes , UpperCAmelCase )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Union[str, Any] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Tuple ):
# with apply_OCR = True
A_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
A_ = Image.open(ds[0]["file"] ).convert("RGB" )
A_ = image_processing(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
A_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase )
self.assertListEqual(encoding.boxes , UpperCAmelCase )
# with apply_OCR = False
A_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase )
A_ = image_processing(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 86 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :List[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __snake_case ( __UpperCamelCase : ndarray ):
"""simple docstring"""
return np.dot(__UpperCamelCase ,__UpperCamelCase )
class _a :
"""simple docstring"""
def __init__( self : Dict , *,
UpperCAmelCase : float = np.inf , UpperCAmelCase : str = "linear" , UpperCAmelCase : float = 0.0 , ):
A_ = regularization
A_ = gamma
if kernel == "linear":
A_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
A_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
A_ = f'''Unknown kernel: {kernel}'''
raise ValueError(UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : ndarray , UpperCAmelCase : ndarray ):
return np.dot(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : ndarray , UpperCAmelCase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __A ( self : int , UpperCAmelCase : list[ndarray] , UpperCAmelCase : ndarray ):
A_ = observations
A_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((A_) , ) = np.shape(UpperCAmelCase )
def to_minimize(UpperCAmelCase : ndarray ) -> float:
A_ = 0
((A_) , ) = np.shape(UpperCAmelCase )
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(UpperCAmelCase )
A_ = LinearConstraint(UpperCAmelCase , 0 , 0 )
A_ = Bounds(0 , self.regularization )
A_ = minimize(
UpperCAmelCase , np.ones(UpperCAmelCase ) , bounds=UpperCAmelCase , constraints=[ly_contraint] ).x
A_ = l_star
# calculating mean offset of separation plane to points
A_ = 0
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
A_ = s / n
def __A ( self : Dict , UpperCAmelCase : ndarray ):
A_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :List[Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Optional[int] , UpperCAmelCase : int ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : List[str] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Any , UpperCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch | 86 | 1 |
from __future__ import annotations
__a :str = '#'
class _a :
"""simple docstring"""
def __init__( self : int ):
A_ = {}
def __A ( self : int , UpperCAmelCase : str ):
A_ = self._trie
for char in text:
if char not in trie:
A_ = {}
A_ = trie[char]
A_ = True
def __A ( self : Tuple , UpperCAmelCase : str ):
A_ = self._trie
for char in prefix:
if char in trie:
A_ = trie[char]
else:
return []
return self._elements(UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : dict ):
A_ = []
for c, v in d.items():
A_ = [" "] if c == END else [(c + s) for s in self._elements(UpperCAmelCase )]
result.extend(UpperCAmelCase )
return tuple(UpperCAmelCase )
__a :Dict = Trie()
__a :Union[str, Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def __snake_case ( ):
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 86 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase ) | 86 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __snake_case ( ):
"""simple docstring"""
A_ = 10
A_ = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
A_ = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(__UpperCamelCase ) ),
} ,features=__UpperCamelCase ,)
return dataset
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__a :Optional[int] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "file.txt"
A_ = FILE_CONTENT
with open(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
import bza
A_ = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
A_ = bytes(__UpperCamelCase ,"utf-8" )
with bza.open(__UpperCamelCase ,"wb" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
import gzip
A_ = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
A_ = bytes(__UpperCamelCase ,"utf-8" )
with gzip.open(__UpperCamelCase ,"wb" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A_ = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
A_ = bytes(__UpperCamelCase ,"utf-8" )
with lza.frame.open(__UpperCamelCase ,"wb" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : int ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A_ = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(__UpperCamelCase ,"w" ) as archive:
archive.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : str ):
"""simple docstring"""
import tarfile
A_ = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(__UpperCamelCase ,"w" ) as f:
f.add(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
import lzma
A_ = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
A_ = bytes(__UpperCamelCase ,"utf-8" )
with lzma.open(__UpperCamelCase ,"wb" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
import zipfile
A_ = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A_ = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
A_ = bytes(__UpperCamelCase ,"utf-8" )
with zstd.open(__UpperCamelCase ,"wb" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "file.xml"
A_ = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase )
return filename
__a :int = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__a :int = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__a :Optional[int] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__a :int = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__a :Optional[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def __snake_case ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = datasets.Dataset.from_dict(__UpperCamelCase )
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
A_ = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" ,tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(__UpperCamelCase ,"w" ,newline="" ) as f:
A_ = csv.DictWriter(__UpperCamelCase ,fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(__UpperCamelCase ,"w" ,newline="" ) as f:
A_ = csv.DictWriter(__UpperCamelCase ,fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
import bza
A_ = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(__UpperCamelCase ,"rb" ) as f:
A_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase ,"wb" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Any ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.basename(csv_path.replace(".csv" ,".CSV" ) ) )
f.write(__UpperCamelCase ,arcname=os.path.basename(csva_path.replace(".csv" ,".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.join("main_dir" ,os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase ,arcname=os.path.join("main_dir" ,os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
A_ = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(__UpperCamelCase ,"wb" ) as f:
A_ = pq.ParquetWriter(__UpperCamelCase ,schema=__UpperCamelCase )
A_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} ,schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
A_ = {"data": DATA}
with open(__UpperCamelCase ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
A_ = {"data": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(__UpperCamelCase ,"w" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(__UpperCamelCase ,"w" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(__UpperCamelCase ,"w" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(__UpperCamelCase ,"w" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
import gzip
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(__UpperCamelCase ,"rb" ) as orig_file:
with gzip.open(__UpperCamelCase ,"wb" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
import gzip
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(__UpperCamelCase ,"rb" ) as orig_file:
with gzip.open(__UpperCamelCase ,"wb" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.join("nested" ,os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.join("main_dir" ,os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase ,arcname=os.path.join("main_dir" ,os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(__UpperCamelCase ,"w" ) as f:
f.add(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(__UpperCamelCase ,"w" ) as f:
f.add(__UpperCamelCase ,arcname=os.path.join("nested" ,os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = ["0", "1", "2", "3"]
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(__UpperCamelCase ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = ["0", "1", "2", "3"]
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(__UpperCamelCase ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["0", "1", "2", "3"]
A_ = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(__UpperCamelCase ,"w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : str ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.join("main_dir" ,os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase ,arcname=os.path.join("main_dir" ,os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Dict ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.basename("unsupported.ext" ) )
f.write(__UpperCamelCase ,arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
A_ = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="session" )
def __snake_case ( ):
"""simple docstring"""
return os.path.join("tests" ,"features" ,"data" ,"test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __snake_case ( ):
"""simple docstring"""
return os.path.join("tests" ,"features" ,"data" ,"test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(__UpperCamelCase ,"w" ) as f:
f.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase ,arcname=os.path.basename(__UpperCamelCase ).replace(".jpg" ,"2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" ,"w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" ,"w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" ,"w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" ,"w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" ,"w" ) as f:
f.write("bar\n" * 10 )
return data_dir | 86 |
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 86 | 1 |
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__UpperCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = None
A_ = None
A_ = graph
self._normalize_graph(UpperCAmelCase , UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = None
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self : str ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = algorithm(self )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] ):
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self : Optional[int] ):
if not self.executed:
self._algorithm()
A_ = True
def __A ( self : Dict ):
pass
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
# use this to save your result
A_ = -1
def __A ( self : Tuple ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
super().__init__(UpperCAmelCase )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self : List[str] ):
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(UpperCAmelCase ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self : List[str] , UpperCAmelCase : Dict ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCAmelCase , UpperCAmelCase )
self.relabel(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__a :Tuple = [0]
__a :Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__a :List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__a :List[Any] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}") | 86 | 1 |
def __snake_case ( __UpperCamelCase : list[int] ):
"""simple docstring"""
A_ = []
if len(__UpperCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(__UpperCamelCase ) ):
A_ = nums.pop(0 )
A_ = permute(__UpperCamelCase )
for perm in permutations:
perm.append(__UpperCamelCase )
result.extend(__UpperCamelCase )
nums.append(__UpperCamelCase )
return result
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
def backtrack(__UpperCamelCase : Dict ):
if start == len(__UpperCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__UpperCamelCase ,len(__UpperCamelCase ) ):
A_ , A_ = nums[i], nums[start]
backtrack(start + 1 )
A_ , A_ = nums[i], nums[start] # backtrack
A_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__a :Dict = permutea([1, 2, 3])
print(res)
doctest.testmod() | 86 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :int = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A_ = f'''{src_lang}-{tgt_lang}'''
A_ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = os.path.join(__UpperCamelCase ,"README.md" )
print(f'''Generating {path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__a :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__a :Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a , __a , __a :int = model_name.split('-')
__a :str = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 86 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__a :List[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__a :str = direct_transformers_import(PATH_TO_TRANSFORMERS)
__a :List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__a :List[Any] = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
__a :Tuple = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = None
# source code of `config_class`
A_ = inspect.getsource(__UpperCamelCase )
A_ = _re_checkpoint.findall(__UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A_ = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
A_ = ckpt_name
break
return checkpoint
def __snake_case ( ):
"""simple docstring"""
A_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A_ = get_checkpoint_from_config_class(__UpperCamelCase )
A_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
A_ = "\n".join(sorted(__UpperCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 86 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 86 | 1 |
import re
from filelock import FileLock
try:
import nltk
__a :List[Any] = True
except (ImportError, ModuleNotFoundError):
__a :List[str] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
re.sub("<n>" ,"" ,__UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCamelCase ) ) | 86 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,)
def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ):
A_ = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase )
return config
def __A ( self : Optional[Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def __A ( self : Dict ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def __A ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def __A ( self : Tuple ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def __A ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def __A ( self : Optional[int] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def __A ( self : Tuple ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = self.dummy_sample_deter + 0.1
A_ = self.dummy_sample_deter - 0.1
A_ = samplea.shape[0]
A_ = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase )
A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="v_prediction" )
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __A ( self : Union[str, Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
A_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
A_ = -1
else:
A_ = timesteps[i + 1]
A_ = scheduler.previous_timestep(UpperCAmelCase )
A_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
A_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase ) | 86 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__a :Any = ''
__a :int = ''
__a :str = ''
__a :Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def __snake_case ( ):
"""simple docstring"""
A_ , A_ = get_dataset(__UpperCamelCase ,__UpperCamelCase )
print("Processing..." )
A_ , A_ , A_ = update_image_and_anno(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A_ = random_chars(32 )
A_ = paths[index].split(os.sep )[-1].rsplit("." ,1 )[0]
A_ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' ,__UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
A_ = []
for anno in new_annos[index]:
A_ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(f'''/{file_root}.txt''' ,"w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = []
A_ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase ,"*.txt" ) ):
A_ = label_file.split(os.sep )[-1].rsplit("." ,1 )[0]
with open(__UpperCamelCase ) as in_file:
A_ = in_file.readlines()
A_ = os.path.join(__UpperCamelCase ,f'''{label_name}.jpg''' )
A_ = []
for obj_list in obj_lists:
A_ = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ,__UpperCamelCase : int = 1 ):
"""simple docstring"""
A_ = []
A_ = []
A_ = []
for idx in range(len(__UpperCamelCase ) ):
A_ = []
A_ = img_list[idx]
path_list.append(__UpperCamelCase )
A_ = anno_list[idx]
A_ = cva.imread(__UpperCamelCase )
if flip_type == 1:
A_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
A_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
A_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def __snake_case ( __UpperCamelCase : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
A_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅') | 86 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with open(__UpperCamelCase ) as metadata_file:
A_ = json.load(__UpperCamelCase )
A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__UpperCamelCase )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__UpperCamelCase ).eval()
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" )
A_ = model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {}
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__a :Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 86 | 1 |
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
A_ = len(__UpperCamelCase )
A_ = max(__UpperCamelCase )
A_ = min(__UpperCamelCase )
# create the counting array
A_ = coll_max + 1 - coll_min
A_ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,__UpperCamelCase ):
A_ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
A_ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,__UpperCamelCase ) ):
A_ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
return "".join([chr(__UpperCamelCase ) for i in counting_sort([ord(__UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
__a :Tuple = input('Enter numbers separated by a comma:\n').strip()
__a :Dict = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted)) | 86 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 86 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
A_ = name.replace("img_encoder.pos_embed" ,"vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
A_ = name.replace("img_encoder.patch_embed.proj" ,"vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
A_ = name.replace("img_encoder.patch_embed.norm" ,"vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
A_ = name.replace("img_encoder.layers" ,"vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
A_ = name.replace("blocks" ,"layers" )
if "attn" in name and "pre_assign" not in name:
A_ = name.replace("attn" ,"self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
A_ = name.replace("proj" ,"out_proj" )
if "pre_assign_attn.attn.proj" in name:
A_ = name.replace("pre_assign_attn.attn.proj" ,"pre_assign_attn.attn.out_proj" )
if "norm1" in name:
A_ = name.replace("norm1" ,"layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
A_ = name.replace("norm2" ,"layer_norm2" )
if "img_encoder.norm" in name:
A_ = name.replace("img_encoder.norm" ,"vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
A_ = name.replace("text_encoder.token_embedding" ,"text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
A_ = name.replace("text_encoder.positional_embedding" ,"text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
A_ = name.replace("text_encoder.transformer.resblocks." ,"text_model.encoder.layers." )
if "ln_1" in name:
A_ = name.replace("ln_1" ,"layer_norm1" )
if "ln_2" in name:
A_ = name.replace("ln_2" ,"layer_norm2" )
if "c_fc" in name:
A_ = name.replace("c_fc" ,"fc1" )
if "c_proj" in name:
A_ = name.replace("c_proj" ,"fc2" )
if "text_encoder" in name:
A_ = name.replace("text_encoder" ,"text_model" )
if "ln_final" in name:
A_ = name.replace("ln_final" ,"final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
A_ = name.replace("img_projector.linear_hidden." ,"visual_projection." )
if "img_projector.linear_out." in name:
A_ = name.replace("img_projector.linear_out." ,"visual_projection.3." )
if "text_projector.linear_hidden" in name:
A_ = name.replace("text_projector.linear_hidden" ,"text_projection" )
if "text_projector.linear_out" in name:
A_ = name.replace("text_projector.linear_out" ,"text_projection.3" )
return name
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ = key.split("." )
A_ , A_ = int(key_split[2] ), int(key_split[4] )
A_ = config.vision_config.hidden_size
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ = key.split("." )
A_ = int(key_split[3] )
A_ = config.text_config.hidden_size
if "weight" in key:
A_ = val[:dim, :]
A_ = val[
dim : dim * 2, :
]
A_ = val[-dim:, :]
else:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
else:
A_ = rename_key(__UpperCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A_ = val.squeeze_()
else:
A_ = val
return orig_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int]="groupvit-gcc-yfcc" ,__UpperCamelCase : List[Any]=False ):
"""simple docstring"""
A_ = GroupViTConfig()
A_ = GroupViTModel(__UpperCamelCase ).eval()
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )["model"]
A_ = convert_state_dict(__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCamelCase ) == 0)
# verify result
A_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
A_ = prepare_img()
A_ = processor(text=["a photo of a cat", "a photo of a dog"] ,images=__UpperCamelCase ,padding=__UpperCamelCase ,return_tensors="pt" )
with torch.no_grad():
A_ = model(**__UpperCamelCase )
if model_name == "groupvit-gcc-yfcc":
A_ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
A_ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image ,__UpperCamelCase ,atol=1E-3 )
processor.save_pretrained(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print("Successfully saved processor and model to" ,__UpperCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(__UpperCamelCase ,organization="nielsr" )
model.push_to_hub(__UpperCamelCase ,organization="nielsr" )
if __name__ == "__main__":
__a :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__a :int = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 86 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a :Optional[Any] = 'src/transformers'
__a :Tuple = 'docs/source/en/tasks'
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start prompt.
A_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
A_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a :List[str] = direct_transformers_import(TRANSFORMERS_PATH)
__a :Optional[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a :Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = TASK_GUIDE_TO_MODELS[task_guide]
A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
A_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ):
"""simple docstring"""
A_ , A_ , A_ , A_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,)
A_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 86 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(UpperCAmelCase ) for s in shape] )}.npy'''
def __A ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : List[Any]=(4, 4, 64, 64) , UpperCAmelCase : Optional[Any]=False ):
A_ = jnp.bfloataa if fpaa else jnp.floataa
A_ = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) , dtype=UpperCAmelCase )
return image
def __A ( self : Any , UpperCAmelCase : List[str]=False , UpperCAmelCase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
A_ = jnp.bfloataa if fpaa else jnp.floataa
A_ = "bf16" if fpaa else None
A_ , A_ = FlaxUNetaDConditionModel.from_pretrained(
UpperCAmelCase , subfolder="unet" , dtype=UpperCAmelCase , revision=UpperCAmelCase )
return model, params
def __A ( self : List[str] , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : List[str]=(4, 77, 768) , UpperCAmelCase : Tuple=False ):
A_ = jnp.bfloataa if fpaa else jnp.floataa
A_ = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) , dtype=UpperCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any ):
A_ , A_ = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=UpperCAmelCase )
A_ = self.get_latents(UpperCAmelCase , fpaa=UpperCAmelCase )
A_ = self.get_encoder_hidden_states(UpperCAmelCase , fpaa=UpperCAmelCase )
A_ = model.apply(
{"params": params} , UpperCAmelCase , jnp.array(UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase , ).sample
assert sample.shape == latents.shape
A_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A_ = jnp.array(UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def __A ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ , A_ = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=UpperCAmelCase )
A_ = self.get_latents(UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=UpperCAmelCase )
A_ = self.get_encoder_hidden_states(UpperCAmelCase , shape=(4, 77, 1024) , fpaa=UpperCAmelCase )
A_ = model.apply(
{"params": params} , UpperCAmelCase , jnp.array(UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase , ).sample
assert sample.shape == latents.shape
A_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A_ = jnp.array(UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-2 ) | 86 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 86 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__a :Any = logging.get_logger(__name__)
__a :Tuple = 'T5Config'
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 'mt5'
_lowerCamelCase : Any = MTaConfig
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'mt5'
_lowerCamelCase : int = MTaConfig
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'mt5'
_lowerCamelCase : int = MTaConfig | 86 |
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 86 | 1 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 1 |
from typing import List
import numpy as np
def __snake_case ( __UpperCamelCase : dict ):
"""simple docstring"""
A_ = {key: len(__UpperCamelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCamelCase ,__UpperCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
A_ = max(lists_lengths.values() ,default=0 )
return max(1 ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = []
for group_idx in range(__UpperCamelCase ):
A_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
A_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
A_ = range(__UpperCamelCase ,start + num_shards_to_add )
shards_indices_per_group.append(__UpperCamelCase )
return shards_indices_per_group
def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = _number_of_shards_in_gen_kwargs(__UpperCamelCase )
if num_shards == 1:
return [dict(__UpperCamelCase )]
else:
A_ = _distribute_shards(num_shards=__UpperCamelCase ,max_num_jobs=__UpperCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCamelCase ,__UpperCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCamelCase ) )
]
def __snake_case ( __UpperCamelCase : List[dict] ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] ,__UpperCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __snake_case ( __UpperCamelCase : np.random.Generator ,__UpperCamelCase : dict ):
"""simple docstring"""
A_ = {len(__UpperCamelCase ) for value in gen_kwargs.values() if isinstance(__UpperCamelCase ,__UpperCamelCase )}
A_ = {}
for size in list_sizes:
A_ = list(range(__UpperCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
A_ = dict(__UpperCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = [value[i] for i in indices_per_size[len(__UpperCamelCase )]]
return shuffled_kwargs | 86 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Optional[int] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__a :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a :Union[str, Any] = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = ['ViTFeatureExtractor']
__a :List[Any] = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__a :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 86 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a :List[str] = logging.get_logger(__name__)
__a :Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__a :Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ = None
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
elif name.split("." )[0] == "proj":
A_ = fairseq_model.proj
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name:
A_ = "bias"
elif "weight" in name:
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ , A_ = emb.weight.shape
A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.split(" " )[0] for line in lines]
A_ = len(__UpperCamelCase )
A_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,):
"""simple docstring"""
A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase )
A_ = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase )
A_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ = model[0].eval()
# set weights for wav2vec2 encoder
A_ = WavaVecaModel(__UpperCamelCase )
A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase )
A_ = SpeechaTextaForCausalLM(__UpperCamelCase )
A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
A_ = False
# add projection layer
A_ = nn.Parameter(projection_layer.weight )
A_ = nn.Parameter(projection_layer.bias )
A_ = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) )
tokenizer.save_pretrained(__UpperCamelCase )
A_ = hf_wavavec.config.to_dict()
A_ = tokenizer.pad_token_id
A_ = tokenizer.bos_token_id
A_ = tokenizer.eos_token_id
A_ = "speech_to_text_2"
A_ = "wav2vec2"
A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__a :Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Optional[Any] = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__a :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__a :Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : str ):
A_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def __A ( cls : str ):
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : Any ):
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(UpperCAmelCase )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase , repo_id="test-model-flax" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
def __A ( self : Tuple ):
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(UpperCAmelCase )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCAmelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase , 1E-3 , msg=f'''{key} not identical''' )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
A_ = FlaxBertModel(UpperCAmelCase )
A_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
with self.assertRaises(UpperCAmelCase ):
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase )
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.assertTrue(check_models_equal(UpperCAmelCase , UpperCAmelCase ) )
def __A ( self : Tuple ):
A_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
A_ = FlaxBertModel(UpperCAmelCase )
A_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase , UpperCAmelCase ) , max_shard_size="10KB" )
with self.assertRaises(UpperCAmelCase ):
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase )
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.assertTrue(check_models_equal(UpperCAmelCase , UpperCAmelCase ) )
def __A ( self : Optional[Any] ):
A_ = "bert"
A_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(UpperCAmelCase ):
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase )
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Tuple ):
A_ = "bert"
A_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(UpperCAmelCase ):
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase )
A_ = FlaxBertModel.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase ) | 86 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__a :List[str] = logging.get_logger(__name__)
__a :Dict = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
A_ = TOKENIZER_CLASSES
else:
A_ = {tokenizer_name: getattr(__UpperCamelCase ,tokenizer_name + "Fast" )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
A_ = TOKENIZER_CLASSES[tokenizer_name]
A_ = True
if checkpoint_name is None:
A_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
A_ = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
A_ = tokenizer_class.from_pretrained(__UpperCamelCase ,force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
A_ , A_ = checkpoint.split("/" )
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
elif add_prefix:
A_ = checkpoint
A_ = dump_path
else:
A_ = None
A_ = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
A_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
A_ = file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
A_ = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
A_ = tokenizer.save_pretrained(
__UpperCamelCase ,legacy_format=__UpperCamelCase ,filename_prefix=__UpperCamelCase )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__UpperCamelCase )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
__a :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
__a :List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) | 86 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Tuple = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__a :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :List[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 86 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :List[Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Optional[int] , UpperCAmelCase : int ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : List[str] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Any , UpperCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch | 86 | 1 |
from copy import deepcopy
class _a :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : list[int] | None = None , UpperCAmelCase : int | None = None ):
if arr is None and size is not None:
A_ = size
A_ = [0] * size
elif arr is not None:
self.init(UpperCAmelCase )
else:
raise ValueError("Either arr or size must be specified" )
def __A ( self : Optional[int] , UpperCAmelCase : list[int] ):
A_ = len(UpperCAmelCase )
A_ = deepcopy(UpperCAmelCase )
for i in range(1 , self.size ):
A_ = self.next_(UpperCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __A ( self : Optional[Any] ):
A_ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ = self.next_(UpperCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __A ( UpperCAmelCase : int ):
return index + (index & (-index))
@staticmethod
def __A ( UpperCAmelCase : int ):
return index - (index & (-index))
def __A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ = self.next_(UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int ):
self.add(UpperCAmelCase , value - self.get(UpperCAmelCase ) )
def __A ( self : Optional[int] , UpperCAmelCase : int ):
if right == 0:
return 0
A_ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ = self.prev(UpperCAmelCase )
return result
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : int ):
return self.prefix(UpperCAmelCase ) - self.prefix(UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : int ):
return self.query(UpperCAmelCase , index + 1 )
def __A ( self : Dict , UpperCAmelCase : int ):
value -= self.tree[0]
if value < 0:
return -1
A_ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase ) | 86 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__a :List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _a :
"""simple docstring"""
_lowerCamelCase : List[str] = PegasusConfig
_lowerCamelCase : Any = {}
_lowerCamelCase : Optional[Any] = 'gelu'
def __init__( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : str=13 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=20 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Any=1 , UpperCAmelCase : Union[str, Any]=0 , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = eos_token_id
A_ = pad_token_id
A_ = bos_token_id
def __A ( self : Union[str, Any] ):
A_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
A_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
A_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A_ = prepare_pegasus_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] ):
A_ = 20
A_ = model_class_name(UpperCAmelCase )
A_ = model.encode(inputs_dict["input_ids"] )
A_ , A_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
A_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , )
A_ = model.decode(UpperCAmelCase , UpperCAmelCase )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def __A ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ):
A_ = 20
A_ = model_class_name(UpperCAmelCase )
A_ = model.encode(inputs_dict["input_ids"] )
A_ , A_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
A_ = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=None ,):
"""simple docstring"""
if attention_mask is None:
A_ = np.not_equal(__UpperCamelCase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
A_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_lowerCamelCase : Tuple = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_lowerCamelCase : List[str] = True
_lowerCamelCase : List[str] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
def __A ( self : Optional[int] ):
A_ = FlaxPegasusModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase )
def __A ( self : Dict ):
self.config_tester.run_common_tests()
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[str] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
@jax.jit
def encode_jitted(UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : str ):
return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
with self.subTest("JIT Enabled" ):
A_ = encode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A_ = encode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self : Optional[Any] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = model_class(UpperCAmelCase )
A_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
A_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , )
with self.subTest("JIT Enabled" ):
A_ = decode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A_ = decode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self : int ):
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=UpperCAmelCase )
A_ = np.ones((1, 1) )
A_ = model(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
def __A ( self : Optional[int] ):
A_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
A_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
A_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A_ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
A_ = tokenizer(UpperCAmelCase , return_tensors="np" , truncation=UpperCAmelCase , max_length=512 , padding=UpperCAmelCase )
A_ = model.generate(**UpperCAmelCase , num_beams=2 ).sequences
A_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
assert tgt_text == decoded | 86 |
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 86 | 1 |
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(__UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 86 |
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = None
A_ = None
A_ = graph
self._normalize_graph(UpperCAmelCase , UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = None
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self : str ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = algorithm(self )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] ):
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self : Optional[int] ):
if not self.executed:
self._algorithm()
A_ = True
def __A ( self : Dict ):
pass
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
# use this to save your result
A_ = -1
def __A ( self : Tuple ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
super().__init__(UpperCAmelCase )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self : List[str] ):
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(UpperCAmelCase ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self : List[str] , UpperCAmelCase : Dict ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCAmelCase , UpperCAmelCase )
self.relabel(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__a :Tuple = [0]
__a :Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__a :List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__a :List[Any] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}") | 86 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = (IPNDMScheduler,)
_lowerCamelCase : List[str] = (('num_inference_steps', 5_0),)
def __A ( self : Optional[Any] , **UpperCAmelCase : str ):
A_ = {"num_train_timesteps": 1000}
config.update(**UpperCAmelCase )
return config
def __A ( self : List[str] , UpperCAmelCase : Dict=0 , **UpperCAmelCase : List[Any] ):
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCAmelCase )
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
A_ = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Tuple ):
pass
def __A ( self : Optional[int] , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ):
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
A_ = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Optional[int] , **UpperCAmelCase : List[str] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCAmelCase )
A_ = scheduler_class(**UpperCAmelCase )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def __A ( self : int ):
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , "set_timesteps" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self : Any ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase , time_step=UpperCAmelCase )
def __A ( self : Tuple ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 2540529 ) < 10 | 86 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 86 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['image_processor', 'tokenizer']
_lowerCamelCase : Dict = 'BlipImageProcessor'
_lowerCamelCase : Optional[int] = 'AutoTokenizer'
def __init__( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
super().__init__(UpperCAmelCase , UpperCAmelCase )
# add QFormer tokenizer
A_ = qformer_tokenizer
def __call__( self : Optional[Any] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ):
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
A_ = BatchFeature()
if text is not None:
A_ = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
encoding.update(UpperCAmelCase )
A_ = self.qformer_tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
A_ = qformer_text_encoding.pop("input_ids" )
A_ = qformer_text_encoding.pop("attention_mask" )
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
encoding.update(UpperCAmelCase )
return encoding
def __A ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __A ( self : Any ):
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ):
if os.path.isfile(UpperCAmelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase )
return super().save_pretrained(UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( cls : Optional[int] , UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , subfolder="qformer_tokenizer" )
A_ = cls._get_arguments_from_pretrained(UpperCAmelCase , **UpperCAmelCase )
args.append(UpperCAmelCase )
return cls(*UpperCAmelCase ) | 86 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A_ = f'''{src_lang}-{tgt_lang}'''
A_ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = os.path.join(__UpperCamelCase ,"README.md" )
print(f'''Generating {path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__a :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__a :Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a , __a , __a :int = model_name.split('-')
__a :str = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 86 | 1 |
__a :List[str] = 6_5521
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = 1
A_ = 0
for plain_chr in plain_text:
A_ = (a + ord(__UpperCamelCase )) % MOD_ADLER
A_ = (b + a) % MOD_ADLER
return (b << 16) | a | 86 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 86 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
__a :str = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
__a :Any = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
__a :List[str] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def __A ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=None ):
return {
"matthews_correlation": float(matthews_corrcoef(UpperCAmelCase , UpperCAmelCase , sample_weight=UpperCAmelCase ) ),
} | 86 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,)
def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ):
A_ = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase )
return config
def __A ( self : Optional[Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def __A ( self : Dict ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def __A ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def __A ( self : Tuple ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def __A ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def __A ( self : Optional[int] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def __A ( self : Tuple ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = self.dummy_sample_deter + 0.1
A_ = self.dummy_sample_deter - 0.1
A_ = samplea.shape[0]
A_ = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase )
A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="v_prediction" )
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __A ( self : Union[str, Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
A_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
A_ = -1
else:
A_ = timesteps[i + 1]
A_ = scheduler.previous_timestep(UpperCAmelCase )
A_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
A_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase ) | 86 | 1 |
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = len(__UpperCamelCase )
A_ = len(__UpperCamelCase )
A_ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A_ = True
for i in range(__UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ = True
if a[i].islower():
A_ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with open(__UpperCamelCase ) as metadata_file:
A_ = json.load(__UpperCamelCase )
A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__UpperCamelCase )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__UpperCamelCase ).eval()
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" )
A_ = model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {}
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__a :Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 86 | 1 |
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 86 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 86 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Union[str, Any]=18 , UpperCAmelCase : Optional[Any]=30 , UpperCAmelCase : Optional[Any]=400 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=[0.48_145_466, 0.4_578_275, 0.40_821_073] , UpperCAmelCase : str=[0.26_862_954, 0.26_130_258, 0.27_577_711] , UpperCAmelCase : int=True , ):
A_ = size if size is not None else {"height": 224, "width": 224}
A_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_center_crop
A_ = crop_size
A_ = do_normalize
A_ = image_mean
A_ = image_std
A_ = do_convert_rgb
def __A ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __A ( self : List[str] , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : int=False , UpperCAmelCase : List[Any]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
A_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
A_ = []
for i in range(self.batch_size ):
A_ , A_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
A_ = [torch.from_numpy(UpperCAmelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __A ( self : int ):
A_ = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCAmelCase )
@property
def __A ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : str ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_convert_rgb" ) )
def __A ( self : str ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __A ( self : str ):
pass
def __A ( self : Optional[int] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : Optional[int] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : Optional[Any] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __A ( self : List[str] ):
A_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCAmelCase )
A_ = 3
@property
def __A ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[int] ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_convert_rgb" ) )
def __A ( self : List[str] ):
pass
def __A ( self : Any ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 86 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a :Optional[Any] = 'src/transformers'
__a :Tuple = 'docs/source/en/tasks'
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start prompt.
A_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
A_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a :List[str] = direct_transformers_import(TRANSFORMERS_PATH)
__a :Optional[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a :Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = TASK_GUIDE_TO_MODELS[task_guide]
A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
A_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ):
"""simple docstring"""
A_ , A_ , A_ , A_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,)
A_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 86 | 1 |
from __future__ import annotations
from typing import Any
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float = 0 ):
A_ , A_ = row, column
A_ = [[default_value for c in range(UpperCAmelCase )] for r in range(UpperCAmelCase )]
def __str__( self : Tuple ):
A_ = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
A_ = 0
for row_vector in self.array:
for obj in row_vector:
A_ = max(UpperCAmelCase , len(str(UpperCAmelCase ) ) )
A_ = f'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase : list[float] ) -> str:
nonlocal string_format_identifier
A_ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase ) for row_vector in self.array )
return s
def __repr__( self : Union[str, Any] ):
return str(self )
def __A ( self : List[Any] , UpperCAmelCase : tuple[int, int] ):
if not (isinstance(UpperCAmelCase , (list, tuple) ) and len(UpperCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , UpperCAmelCase : tuple[int, int] ):
assert self.validate_indicies(UpperCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , UpperCAmelCase : tuple[int, int] , UpperCAmelCase : float ):
assert self.validate_indicies(UpperCAmelCase )
A_ = value
def __add__( self : Tuple , UpperCAmelCase : Matrix ):
assert isinstance(UpperCAmelCase , UpperCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
A_ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ = self[r, c] + another[r, c]
return result
def __neg__( self : int ):
A_ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ = -self[r, c]
return result
def __sub__( self : Union[str, Any] , UpperCAmelCase : Matrix ):
return self + (-another)
def __mul__( self : Tuple , UpperCAmelCase : int | float | Matrix ):
if isinstance(UpperCAmelCase , (int, float) ): # Scalar multiplication
A_ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ = self[r, c] * another
return result
elif isinstance(UpperCAmelCase , UpperCAmelCase ): # Matrix multiplication
assert self.column == another.row
A_ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A_ = f'''Unsupported type given for another ({type(UpperCAmelCase )})'''
raise TypeError(UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
A_ = self[r, c]
return result
def __A ( self : List[Any] , UpperCAmelCase : Matrix , UpperCAmelCase : Matrix ):
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A_ = v.transpose()
A_ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
"""simple docstring"""
A_ = Matrix(3 ,3 ,0 )
for i in range(3 ):
A_ = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
A_ = Matrix(3 ,1 ,0 )
A_ , A_ , A_ = 1, 2, -3
A_ = Matrix(3 ,1 ,0 )
A_ , A_ , A_ = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCamelCase ,__UpperCamelCase )}''' )
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa() | 86 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 86 | 1 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__a :List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__a :Union[str, Any] = [0, 25, 50]
__a :Any = [25, 50, 75]
__a :Tuple = fuzz.membership.trimf(X, abca)
__a :List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__a :Union[str, Any] = np.ones(75)
__a :Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__a :Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__a :Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__a :List[str] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__a :List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__a :Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__a :Any = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__a :Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__a :Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 86 |
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 86 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :List[str] = logging.get_logger(__name__)
__a :List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = 'trocr'
_lowerCamelCase : List[Any] = ['past_key_values']
_lowerCamelCase : Dict = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any]=50265 , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Any=12 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : str=4096 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Dict=512 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : int=0 , UpperCAmelCase : Optional[int]=2 , **UpperCAmelCase : str , ):
A_ = vocab_size
A_ = d_model
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = decoder_ffn_dim
A_ = activation_function
A_ = max_position_embeddings
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = init_std
A_ = decoder_layerdrop
A_ = use_cache
A_ = scale_embedding
A_ = use_learned_position_embeddings
A_ = layernorm_embedding
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , ) | 86 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__a :Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : List[Any] , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : str ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def __A ( self : int , **UpperCAmelCase : Optional[Any] ):
return {}, {}, {}
def __A ( self : Any , UpperCAmelCase : int ):
A_ = load_image(UpperCAmelCase )
A_ = image.size
A_ = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def __A ( self : Tuple , UpperCAmelCase : str ):
A_ = self.model(**UpperCAmelCase )
return model_outputs
def __A ( self : List[str] , UpperCAmelCase : Optional[int] ):
A_ = model_outputs.predicted_depth
A_ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=UpperCAmelCase )
A_ = prediction.squeeze().cpu().numpy()
A_ = (output * 255 / np.max(UpperCAmelCase )).astype("uint8" )
A_ = Image.fromarray(UpperCAmelCase )
A_ = {}
A_ = predicted_depth
A_ = depth
return output_dict | 86 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
from PIL import Image
def __snake_case ( __UpperCamelCase : Image ,__UpperCamelCase : float ):
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__a :List[str] = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png') | 86 |
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 86 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__a :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a :List[str] = logging.get_logger(__name__)
__a :Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__a :Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ = None
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
elif name.split("." )[0] == "proj":
A_ = fairseq_model.proj
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name:
A_ = "bias"
elif "weight" in name:
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ , A_ = emb.weight.shape
A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.split(" " )[0] for line in lines]
A_ = len(__UpperCamelCase )
A_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,):
"""simple docstring"""
A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase )
A_ = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase )
A_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ = model[0].eval()
# set weights for wav2vec2 encoder
A_ = WavaVecaModel(__UpperCamelCase )
A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase )
A_ = SpeechaTextaForCausalLM(__UpperCamelCase )
A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
A_ = False
# add projection layer
A_ = nn.Parameter(projection_layer.weight )
A_ = nn.Parameter(projection_layer.bias )
A_ = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) )
tokenizer.save_pretrained(__UpperCamelCase )
A_ = hf_wavavec.config.to_dict()
A_ = tokenizer.pad_token_id
A_ = tokenizer.bos_token_id
A_ = tokenizer.eos_token_id
A_ = "speech_to_text_2"
A_ = "wav2vec2"
A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__a :Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 86 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__a :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : StableDiffusionSafetyChecker , UpperCAmelCase : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , )
def __A ( self : List[Any] , UpperCAmelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase )
def __A ( self : Any ):
self.enable_attention_slicing(UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[torch.FloatTensor] = None , **UpperCAmelCase : Tuple , ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = 1
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = len(UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase , UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(UpperCAmelCase )}.''' )
# get prompt text embeddings
A_ = self.tokenizer(
UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
A_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ = text_embeddings.shape
A_ = text_embeddings.repeat(1 , UpperCAmelCase , 1 )
A_ = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ = 42
if negative_prompt is None:
A_ = [""]
elif type(UpperCAmelCase ) is not type(UpperCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase )} !='''
f''' {type(UpperCAmelCase )}.''' )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = [negative_prompt]
elif batch_size != len(UpperCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
A_ = negative_prompt
A_ = text_input_ids.shape[-1]
A_ = self.tokenizer(
UpperCAmelCase , padding="max_length" , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" , )
A_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ = uncond_embeddings.shape[1]
A_ = uncond_embeddings.repeat(UpperCAmelCase , UpperCAmelCase , 1 )
A_ = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ = torch.randn(
UpperCAmelCase , generator=UpperCAmelCase , device="cpu" , dtype=UpperCAmelCase ).to(self.device )
A_ = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device="cpu" , dtype=UpperCAmelCase ).to(
self.device )
else:
A_ = torch.randn(
UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
A_ = latents_reference.to(self.device )
A_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ = 0 if dx < 0 else dx
A_ = 0 if dy < 0 else dy
A_ = max(-dx , 0 )
A_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase , encoder_hidden_states=UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ = noise_pred.chunk(2 )
A_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = 1 / 0.18_215 * latents
A_ = self.vae.decode(UpperCAmelCase ).sample
A_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ = self.feature_extractor(self.numpy_to_pil(UpperCAmelCase ) , return_tensors="pt" ).to(
self.device )
A_ , A_ = self.safety_checker(
images=UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ = None
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCAmelCase , nsfw_content_detected=UpperCAmelCase ) | 86 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,**__UpperCamelCase : str ):
"""simple docstring"""
A_ = AutoConfig.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
A_ = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 86 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.