code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a : Any = logging.get_logger(__name__)
class lowercase_ ( _a ):
'''simple docstring'''
__lowerCAmelCase : int = "AutoTokenizer"
__lowerCAmelCase : Tuple = ["tokenizer"]
__lowerCAmelCase : List[Any] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , a_ , a_=None ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
UpperCAmelCase = speaker_embeddings
@classmethod
def snake_case_ ( cls , a_ , a_="speaker_embeddings_path.json" , **a_ ) -> Optional[int]:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
UpperCAmelCase = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_UpperCAmelCase , _UpperCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
UpperCAmelCase = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
UpperCAmelCase = json.load(_UpperCAmelCase )
else:
UpperCAmelCase = None
UpperCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase , speaker_embeddings=_UpperCAmelCase )
def snake_case_ ( self , a_ , a_="speaker_embeddings_path.json" , a_="speaker_embeddings" , a_ = False , **a_ , ) -> int:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase , _UpperCAmelCase , 'v2' ) , exist_ok=_UpperCAmelCase )
UpperCAmelCase = {}
UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCAmelCase = self._load_voice_preset(_UpperCAmelCase )
UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , _UpperCAmelCase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_UpperCAmelCase , )
UpperCAmelCase = os.path.join(_UpperCAmelCase , F'''{prompt_key}_{key}.npy''' )
UpperCAmelCase = tmp_dict
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def snake_case_ ( self , a_ = None , **a_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.speaker_embeddings[voice_preset]
UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.''' )
UpperCAmelCase = np.load(_UpperCAmelCase )
return voice_preset_dict
def snake_case_ ( self , a_ = None ) -> Optional[Any]:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , a_=None , a_=None , a_="pt" , a_=2_5_6 , a_=False , a_=True , a_=False , **a_ , ) -> str:
"""simple docstring"""
if voice_preset is not None and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCAmelCase = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
UpperCAmelCase = voice_preset + """.npz"""
UpperCAmelCase = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
UpperCAmelCase = self.tokenizer(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
if voice_preset is not None:
UpperCAmelCase = voice_preset
return encoded_text
| 447
|
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase (UpperCamelCase_ : str = "AAPL" ):
'''simple docstring'''
_lowerCAmelCase : Any = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(UpperCamelCase_ ).text , """html.parser""" )
_lowerCAmelCase : Tuple = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 429
| 0
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowerCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase) -> List[str]:
super().__init__(*lowercase__ , **lowercase__)
requires_backends(self , '''vision''')
self.check_model_type(lowercase__)
def __call__( self , __lowercase , **__lowercase) -> str:
return super().__call__(lowercase__ , **lowercase__)
def UpperCamelCase__ ( self , **__lowercase) -> int:
return {}, {}, {}
def UpperCamelCase__ ( self , __lowercase) -> Tuple:
__UpperCamelCase :Optional[Any] = load_image(lowercase__)
__UpperCamelCase :Dict = image.size
__UpperCamelCase :Union[str, Any] = self.image_processor(images=lowercase__ , return_tensors=self.framework)
return model_inputs
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.model(**lowercase__)
return model_outputs
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :Any = model_outputs.predicted_depth
__UpperCamelCase :Any = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=lowercase__)
__UpperCamelCase :Dict = prediction.squeeze().cpu().numpy()
__UpperCamelCase :List[Any] = (output * 255 / np.max(lowercase__)).astype('''uint8''')
__UpperCamelCase :Dict = Image.fromarray(lowercase__)
__UpperCamelCase :List[Any] = {}
__UpperCamelCase :List[str] = predicted_depth
__UpperCamelCase :Tuple = depth
return output_dict
| 702
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = """unispeech"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=80 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=0.5 , **__lowercase , ) -> Optional[int]:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :str = hidden_size
__UpperCamelCase :List[str] = feat_extract_norm
__UpperCamelCase :str = feat_extract_activation
__UpperCamelCase :str = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :Any = conv_bias
__UpperCamelCase :List[Any] = num_conv_pos_embeddings
__UpperCamelCase :Tuple = num_conv_pos_embedding_groups
__UpperCamelCase :Optional[int] = len(self.conv_dim)
__UpperCamelCase :Optional[int] = num_hidden_layers
__UpperCamelCase :Union[str, Any] = intermediate_size
__UpperCamelCase :Tuple = hidden_act
__UpperCamelCase :Optional[int] = num_attention_heads
__UpperCamelCase :Any = hidden_dropout
__UpperCamelCase :List[str] = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :int = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Optional[Any] = layerdrop
__UpperCamelCase :Any = layer_norm_eps
__UpperCamelCase :List[str] = initializer_range
__UpperCamelCase :Tuple = num_ctc_classes
__UpperCamelCase :Union[str, Any] = vocab_size
__UpperCamelCase :List[Any] = do_stable_layer_norm
__UpperCamelCase :Dict = use_weighted_layer_sum
__UpperCamelCase :str = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Optional[int] = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Any = mask_time_min_masks
__UpperCamelCase :Any = mask_feature_prob
__UpperCamelCase :str = mask_feature_length
__UpperCamelCase :Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :Dict = num_codevector_groups
__UpperCamelCase :Optional[int] = contrastive_logits_temperature
__UpperCamelCase :Union[str, Any] = feat_quantizer_dropout
__UpperCamelCase :List[str] = num_negatives
__UpperCamelCase :Union[str, Any] = codevector_dim
__UpperCamelCase :int = proj_codevector_dim
__UpperCamelCase :Tuple = diversity_loss_weight
# ctc loss
__UpperCamelCase :List[Any] = ctc_loss_reduction
__UpperCamelCase :int = ctc_zero_infinity
# pretraining loss
__UpperCamelCase :Optional[Any] = replace_prob
@property
def UpperCamelCase__ ( self) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 452
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__snake_case :Tuple =logging.get_logger(__name__)
__snake_case :Dict ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case :Optional[Any] ={
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__snake_case :Dict ={
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__snake_case :Tuple ={
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__snake_case :Dict ={
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__snake_case :int ={
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__snake_case :Union[str, Any] ={
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__snake_case :List[Any] ={
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__snake_case :List[Any] ={
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__snake_case :Optional[Any] ={
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCAmelCase__ ( UpperCAmelCase_ ):
A_ : Tuple = VOCAB_FILES_NAMES
A_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__snake_case :Union[str, Any] =collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__snake_case :Union[str, Any] =collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__snake_case :str =r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class lowerCAmelCase__ :
def __call__( self : Any , __UpperCamelCase : str , __UpperCamelCase : Any = None , __UpperCamelCase : List[Any] = None , __UpperCamelCase : Optional[int] = False , __UpperCamelCase : Dict = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : List[Any] = None , __UpperCamelCase : Dict = None , **__UpperCamelCase : List[str] , ) -> Optional[Any]:
if titles is None and texts is None:
return super().__call__(
__A , padding=__A , truncation=__A , max_length=__A , return_tensors=__A , return_attention_mask=__A , **__A , )
elif titles is None or texts is None:
A = titles if texts is None else texts
return super().__call__(
__A , __A , padding=__A , truncation=__A , max_length=__A , return_tensors=__A , return_attention_mask=__A , **__A , )
A = titles if not isinstance(__A , __A ) else [titles]
A = texts if not isinstance(__A , __A ) else [texts]
A = len(__A )
A = questions if not isinstance(__A , __A ) else [questions] * n_passages
if len(__A ) != len(__A ):
raise ValueError(
f'''There should be as many titles than texts but got {len(__A )} titles and {len(__A )} texts.''' )
A = super().__call__(__A , __A , padding=__A , truncation=__A )['input_ids']
A = super().__call__(__A , add_special_tokens=__A , padding=__A , truncation=__A )['input_ids']
A = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__A , __A )
]
}
if return_attention_mask is not False:
A = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A = attention_mask
return self.pad(__A , padding=__A , max_length=__A , return_tensors=__A )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict = 16 , __UpperCamelCase : Optional[int] = 64 , __UpperCamelCase : Dict = 4 , ) -> List[str]:
A = reader_input['input_ids']
A , A , A = reader_output[:3]
A = len(__A )
A = sorted(range(__A ) , reverse=__A , key=relevance_logits.__getitem__ )
A = []
for doc_id in sorted_docs:
A = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A = sequence_ids.index(self.pad_token_id )
else:
A = len(__A )
A = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__A , top_spans=__A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__A , start_index=__A , end_index=__A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCamelCase ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , ) -> Dict:
A = []
for start_index, start_score in enumerate(__A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A = sorted(__A , key=lambda __UpperCamelCase : x[1] , reverse=__A )
A = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
A = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
A_ : List[Any] = ["""input_ids""", """attention_mask"""]
| 106
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_A: Union[str, Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _lowerCAmelCase ( _lowerCAmelCase=None )-> Optional[Any]:
if subparsers is not None:
__UpperCAmelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
__UpperCAmelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
__UpperCAmelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_lowerCAmelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_lowerCAmelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
__UpperCAmelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_lowerCAmelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _lowerCAmelCase ( _lowerCAmelCase )-> int:
__UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCAmelCase ):
__UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCAmelCase = defaults.commands
if not args.tpu_name:
__UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
__UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCAmelCase = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
__UpperCAmelCase = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _lowerCAmelCase ):
__UpperCAmelCase = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
__UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCAmelCase ):
__UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCAmelCase = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__UpperCAmelCase = '; '.join(_lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCAmelCase = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(_lowerCAmelCase )}' )
return
subprocess.run(_lowerCAmelCase )
print('Successfully setup pod.' )
def _lowerCAmelCase ( )-> Any:
__UpperCAmelCase = tpu_command_parser()
__UpperCAmelCase = parser.parse_args()
tpu_command_launcher(_lowerCAmelCase )
| 126
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase="attention" ):
SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
SCREAMING_SNAKE_CASE_ = k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2] )
SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
SCREAMING_SNAKE_CASE_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2] )
SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
SCREAMING_SNAKE_CASE_ = q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2] )
SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
SCREAMING_SNAKE_CASE_ = v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
SCREAMING_SNAKE_CASE_ = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
SCREAMING_SNAKE_CASE_ = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE_ = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
SCREAMING_SNAKE_CASE_ = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def A__ ( __lowerCamelCase, *, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
SCREAMING_SNAKE_CASE_ = traverse_util.flatten_dict(variables['''target'''] )
SCREAMING_SNAKE_CASE_ = {"/".join(lowerCamelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE_ = "encoder/encoder/mlp/wi_0/kernel" in old
print('''Split MLP:''', lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE_ = old["token_embedder/embedding"]
# Encoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, '''encoder''', '''pre_attention_layer_norm''' )
SCREAMING_SNAKE_CASE_ = tax_attention_lookup(lowerCamelCase__, lowerCamelCase__, '''encoder''', '''attention''' )
SCREAMING_SNAKE_CASE_ = layer_norm
SCREAMING_SNAKE_CASE_ = k.T
SCREAMING_SNAKE_CASE_ = o.T
SCREAMING_SNAKE_CASE_ = q.T
SCREAMING_SNAKE_CASE_ = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, '''encoder''', '''pre_mlp_layer_norm''' )
SCREAMING_SNAKE_CASE_ = tax_mlp_lookup(lowerCamelCase__, lowerCamelCase__, '''encoder''', lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ = wi[0].T
SCREAMING_SNAKE_CASE_ = wi[1].T
else:
SCREAMING_SNAKE_CASE_ = wi.T
SCREAMING_SNAKE_CASE_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(
lowerCamelCase__, lowerCamelCase__, '''encoder''' ).T
SCREAMING_SNAKE_CASE_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(
lowerCamelCase__, 0, '''encoder''' ).T
SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(
lowerCamelCase__, 0, '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, '''decoder''', '''pre_self_attention_layer_norm''' )
SCREAMING_SNAKE_CASE_ = tax_attention_lookup(lowerCamelCase__, lowerCamelCase__, '''decoder''', '''self_attention''' )
SCREAMING_SNAKE_CASE_ = layer_norm
SCREAMING_SNAKE_CASE_ = k.T
SCREAMING_SNAKE_CASE_ = o.T
SCREAMING_SNAKE_CASE_ = q.T
SCREAMING_SNAKE_CASE_ = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, '''decoder''', '''pre_cross_attention_layer_norm''' )
SCREAMING_SNAKE_CASE_ = tax_attention_lookup(lowerCamelCase__, lowerCamelCase__, '''decoder''', '''encoder_decoder_attention''' )
SCREAMING_SNAKE_CASE_ = layer_norm
SCREAMING_SNAKE_CASE_ = k.T
SCREAMING_SNAKE_CASE_ = o.T
SCREAMING_SNAKE_CASE_ = q.T
SCREAMING_SNAKE_CASE_ = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(lowerCamelCase__, lowerCamelCase__, '''decoder''', '''pre_mlp_layer_norm''' )
SCREAMING_SNAKE_CASE_ = tax_mlp_lookup(lowerCamelCase__, lowerCamelCase__, '''decoder''', lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ = wi[0].T
SCREAMING_SNAKE_CASE_ = wi[1].T
else:
SCREAMING_SNAKE_CASE_ = wi.T
SCREAMING_SNAKE_CASE_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(lowerCamelCase__, lowerCamelCase__, '''decoder''' ).T
SCREAMING_SNAKE_CASE_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE_ = old["decoder/logits_dense/kernel"].T
return new
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
SCREAMING_SNAKE_CASE_ = state_dict["shared.weight"]
return state_dict
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = convert_tax_to_pytorch(
lowerCamelCase__, num_layers=config.num_layers, is_encoder_only=lowerCamelCase__, scalable_attention=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = make_state_dict(lowerCamelCase__, lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__, strict=lowerCamelCase__ )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False, __lowerCamelCase = False, ):
SCREAMING_SNAKE_CASE_ = MTaConfig.from_json_file(lowerCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE_ = UMTaEncoderModel(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE_ = UMTaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCamelCase__ )
print('''Done''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 715
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["image_processor", "tokenizer"]
UpperCAmelCase_ ="AutoImageProcessor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A=None , _A=None , **_A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.image_processor
SCREAMING_SNAKE_CASE_ = False
def __call__( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''images''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(_A , *_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Tuple:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> str:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Tuple:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.image_processor
SCREAMING_SNAKE_CASE_ = False
def _UpperCamelCase ( self , _A , _A=False , _A=None ) -> Optional[Any]:
if added_vocab is None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE_ = {}
while tokens:
SCREAMING_SNAKE_CASE_ = re.search(R'''<s_(.*?)>''' , _A , re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE_ = start_token.group(1 )
SCREAMING_SNAKE_CASE_ = re.search(RF'''</s_{key}>''' , _A , re.IGNORECASE )
SCREAMING_SNAKE_CASE_ = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE_ = tokens.replace(_A , '''''' )
else:
SCREAMING_SNAKE_CASE_ = end_token.group()
SCREAMING_SNAKE_CASE_ = re.escape(_A )
SCREAMING_SNAKE_CASE_ = re.escape(_A )
SCREAMING_SNAKE_CASE_ = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , _A , re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE_ = self.tokenajson(_A , is_inner_value=_A , added_vocab=_A )
if value:
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = value[0]
SCREAMING_SNAKE_CASE_ = value
else: # leaf nodes
SCREAMING_SNAKE_CASE_ = []
for leaf in content.split(R'''<sep/>''' ):
SCREAMING_SNAKE_CASE_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE_ = leaf[1:-2] # for categorical special tokens
output[key].append(_A )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE_ = output[key][0]
SCREAMING_SNAKE_CASE_ = tokens[tokens.find(_A ) + len(_A ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_A , added_vocab=_A )
if len(_A ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _UpperCamelCase ( self ) -> Tuple:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , )
return self.image_processor_class
@property
def _UpperCamelCase ( self ) -> List[str]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , )
return self.image_processor
| 597
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
snake_case = {
'''gpt2''': 1_0_2_4,
'''gpt2-medium''': 1_0_2_4,
'''gpt2-large''': 1_0_2_4,
'''gpt2-xl''': 1_0_2_4,
'''distilgpt2''': 1_0_2_4,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
A__ : Tuple = GPTaTokenizer
def __init__( self : Optional[int] , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]="<|endoftext|>" , __lowerCamelCase : Dict="<|endoftext|>" , __lowerCamelCase : Union[str, Any]="<|endoftext|>" , __lowerCamelCase : List[str]=False , **__lowerCamelCase : Any , ):
"""simple docstring"""
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = kwargs.pop('''add_bos_token''' , __lowerCamelCase )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space:
_snake_case = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**__lowerCamelCase )
_snake_case = add_prefix_space
def __UpperCAmelCase ( self : Optional[int] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_snake_case = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : str ):
"""simple docstring"""
_snake_case = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
_snake_case = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : "Conversation" ):
"""simple docstring"""
_snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] )
if len(__lowerCamelCase ) > self.model_max_length:
_snake_case = input_ids[-self.model_max_length :]
return input_ids
| 103
|
"""simple docstring"""
from math import sqrt
def snake_case ( lowerCAmelCase_ = 1000000 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 103
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 475
|
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase () -> tuple[list[int], int]:
lowercase :Any = [randint(-1000 , 1000) for i in range(10)]
lowercase :Any = randint(-5000 , 5000)
return (arr, r)
UpperCAmelCase = make_dataset()
def lowerCamelCase (a_ :list[int] , a_ :int) -> tuple[int, ...]:
for triplet in permutations(a_ , 3):
if sum(a_) == target:
return tuple(sorted(a_))
return (0, 0, 0)
def lowerCamelCase (a_ :list[int] , a_ :int) -> tuple[int, int, int]:
arr.sort()
lowercase :Union[str, Any] = len(a_)
for i in range(n - 1):
lowercase , lowercase :Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase () -> tuple[float, float]:
lowercase :int = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
lowercase :Optional[Any] = '''
triplet_sum1(*dataset)
'''
lowercase :Union[str, Any] = '''
triplet_sum2(*dataset)
'''
lowercase :Dict = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_0000)
lowercase :Optional[int] = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_0000)
return (min(a_), min(a_))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 475
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a: Dict = 16
_a: Optional[Any] = 32
def __lowerCAmelCase ( A , A = 16 ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a: Optional[Any] = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( A , A ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ = 2
# Initialize accelerator
UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config["lr"]
UpperCAmelCase_ = int(config["num_epochs"] )
UpperCAmelCase_ = int(config["seed"] )
UpperCAmelCase_ = int(config["batch_size"] )
UpperCAmelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase_ = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
UpperCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 162
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = [0] * len(_SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = [1] * len(_SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
print(max(_SCREAMING_SNAKE_CASE ) )
# Adjacency list of Graph
lowercase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 235
| 0
|
import math
import random
def A (__A : float , __A : bool = False ) -> float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
snake_case_ : List[Any] = 0.02
def A (__A : int , __A : int ) -> float:
"""simple docstring"""
UpperCAmelCase_ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(UpperCAmelCase__ ):
# Forward propagation
UpperCAmelCase_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase_ = (expected / 100) - layer_a
# Error delta
UpperCAmelCase_ = layer_1_error * sigmoid_function(UpperCAmelCase__ , UpperCAmelCase__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : int = int(input("Expected value: "))
snake_case_ : Any = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 708
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A (__A : Tuple , __A : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = None
if token is not None:
UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
UpperCAmelCase_ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
UpperCAmelCase_ = requests.get(__A , headers=__A ).json()
UpperCAmelCase_ = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCAmelCase_ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__A ):
UpperCAmelCase_ = requests.get(url + F"""&page={i + 2}""" , headers=__A ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def A (__A : Union[str, Any] , __A : Optional[Any]=None ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = None
if token is not None:
UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
UpperCAmelCase_ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
UpperCAmelCase_ = requests.get(__A , headers=__A ).json()
UpperCAmelCase_ = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
UpperCAmelCase_ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__A ):
UpperCAmelCase_ = requests.get(url + F"""&page={i + 2}""" , headers=__A ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def A (__A : Any , __A : Tuple , __A : Optional[int] , __A : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = None
if token is not None:
UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
UpperCAmelCase_ = requests.get(__A , headers=__A , allow_redirects=__A )
UpperCAmelCase_ = result.headers['''Location''']
UpperCAmelCase_ = requests.get(__A , allow_redirects=__A )
UpperCAmelCase_ = os.path.join(__A , F"""{artifact_name}.zip""" )
with open(__A , '''wb''' ) as fp:
fp.write(response.content )
def A (__A : Union[str, Any] , __A : Optional[int]=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = None
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__A ) as f:
for line in f:
UpperCAmelCase_ = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCAmelCase_ = line[: line.index(''': ''' )]
UpperCAmelCase_ = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
UpperCAmelCase_ = line[len('''FAILED ''' ) :]
failed_tests.append(__A )
elif filename == "job_name.txt":
UpperCAmelCase_ = line
if len(__A ) != len(__A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(__A )} for `errors` """
F"""and {len(__A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
UpperCAmelCase_ = None
if job_name and job_links:
UpperCAmelCase_ = job_links.get(__A , __A )
# A list with elements of the form (line of error, error, failed test)
UpperCAmelCase_ = [x + [y] + [job_link] for x, y in zip(__A , __A )]
return result
def A (__A : List[str] , __A : Any=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = [os.path.join(__A , __A ) for p in os.listdir(__A ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__A , job_links=__A ) )
return errors
def A (__A : Tuple , __A : Dict=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = Counter()
counter.update([x[1] for x in logs] )
UpperCAmelCase_ = counter.most_common()
UpperCAmelCase_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCAmelCase_ = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCAmelCase_ = dict(sorted(r.items() , key=lambda __A : item[1]["count"] , reverse=__A ) )
return r
def A (__A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
UpperCAmelCase_ = test.split('''/''' )[2]
else:
UpperCAmelCase_ = None
return test
def A (__A : str , __A : int=None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCAmelCase_ = [x for x in logs if x[2] is not None]
UpperCAmelCase_ = {x[2] for x in logs}
UpperCAmelCase_ = {}
for test in tests:
UpperCAmelCase_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCAmelCase_ = counter.most_common()
UpperCAmelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCAmelCase_ = sum(error_counts.values() )
if n_errors > 0:
UpperCAmelCase_ = {'''count''': n_errors, '''errors''': error_counts}
UpperCAmelCase_ = dict(sorted(r.items() , key=lambda __A : item[1]["count"] , reverse=__A ) )
return r
def A (__A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = '''| no. | error | status |'''
UpperCAmelCase_ = '''|-:|:-|:-|'''
UpperCAmelCase_ = [header, sep]
for error in reduced_by_error:
UpperCAmelCase_ = reduced_by_error[error]['''count''']
UpperCAmelCase_ = F"""| {count} | {error[:100]} | |"""
lines.append(__A )
return "\n".join(__A )
def A (__A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = '''| model | no. of errors | major error | count |'''
UpperCAmelCase_ = '''|-:|-:|-:|-:|'''
UpperCAmelCase_ = [header, sep]
for model in reduced_by_model:
UpperCAmelCase_ = reduced_by_model[model]['''count''']
UpperCAmelCase_ , UpperCAmelCase_ = list(reduced_by_model[model]['''errors'''].items() )[0]
UpperCAmelCase_ = F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__A )
return "\n".join(__A )
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
snake_case_ : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case_ : Dict = get_job_links(args.workflow_run_id, token=args.token)
snake_case_ : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case_ : List[Any] = k.find(" / ")
snake_case_ : List[str] = k[index + len(" / ") :]
snake_case_ : Optional[int] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case_ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case_ : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case_ : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case_ : Dict = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case_ : str = reduce_by_error(errors)
snake_case_ : Optional[Any] = reduce_by_model(errors)
snake_case_ : int = make_github_table(reduced_by_error)
snake_case_ : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 169
| 0
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( a ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
SCREAMING_SNAKE_CASE__ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _UpperCAmelCase ( A__ ):
@staticmethod
def _snake_case ( UpperCAmelCase : List[Any]):
SCREAMING_SNAKE_CASE_ :str = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_snake_case , required=_snake_case , help="Model\'s type.")
train_parser.add_argument(
"--tf_checkpoint" , type=_snake_case , required=_snake_case , help="TensorFlow checkpoint path or folder.")
train_parser.add_argument(
"--pytorch_dump_output" , type=_snake_case , required=_snake_case , help="Path to the PyTorch saved model output.")
train_parser.add_argument("--config" , type=_snake_case , default="" , help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name" , type=_snake_case , default=_snake_case , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_snake_case)
def __init__( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , *UpperCAmelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ :List[str] = logging.get_logger("transformers-cli/converting")
self._logger.info(F"Loading model {model_type}")
SCREAMING_SNAKE_CASE_ :Optional[int] = model_type
SCREAMING_SNAKE_CASE_ :str = tf_checkpoint
SCREAMING_SNAKE_CASE_ :Tuple = pytorch_dump_output
SCREAMING_SNAKE_CASE_ :Any = config
SCREAMING_SNAKE_CASE_ :Union[str, Any] = finetuning_task_name
def _snake_case ( self : str):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_snake_case)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE_ :List[Any] = self._tf_checkpoint
SCREAMING_SNAKE_CASE_ :Optional[Any] = ""
else:
SCREAMING_SNAKE_CASE_ :List[Any] = self._tf_checkpoint
SCREAMING_SNAKE_CASE_ :Optional[Any] = ""
convert_transfo_xl_checkpoint_to_pytorch(
_snake_case , self._config , self._pytorch_dump_output , _snake_case)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]")
| 631
|
from __future__ import annotations
__magic_name__ = list[list[int]]
# assigning initial values to the grid
__magic_name__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__magic_name__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowerCAmelCase ( A__: Matrix , A__: int , A__: int , A__: int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowerCAmelCase ( A__: Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowerCAmelCase ( A__: Matrix ):
'''simple docstring'''
if location := find_empty_location(A__ ):
UpperCAmelCase , UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A__ , A__ , A__ , A__ ):
UpperCAmelCase = digit
if sudoku(A__ ) is not None:
return grid
UpperCAmelCase = 0
return None
def _lowerCAmelCase ( A__: Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(A__ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__magic_name__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 254
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = "layoutlmv3"
def __init__( self , _UpperCAmelCase=5_02_65 , _UpperCAmelCase=7_68 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=30_72 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_12 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=10_24 , _UpperCAmelCase=1_28 , _UpperCAmelCase=1_28 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=1_28 , _UpperCAmelCase=64 , _UpperCAmelCase=2_56 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=2_24 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case_ = max_ad_position_embeddings
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = has_relative_attention_bias
snake_case_ = rel_pos_bins
snake_case_ = max_rel_pos
snake_case_ = has_spatial_attention_bias
snake_case_ = rel_ad_pos_bins
snake_case_ = max_rel_ad_pos
snake_case_ = text_embed
snake_case_ = visual_embed
snake_case_ = input_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = classifier_dropout
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = version.parse("1.12" )
@property
def UpperCamelCase__ ( self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def UpperCamelCase__ ( self ):
return 1E-5
@property
def UpperCamelCase__ ( self ):
return 12
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 3 , _UpperCAmelCase = 40 , _UpperCAmelCase = 40 , ):
setattr(processor.image_processor , '''apply_ocr''' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
snake_case_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case_ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case_ = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 704
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
UpperCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = '''weight_g'''
elif "weight_v" in name:
snake_case_ = '''weight_v'''
elif "bias" in name:
snake_case_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = '''weight'''
else:
snake_case_ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = full_name.split('''conv_layers.''' )[-1]
snake_case_ = name.split('''.''' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True )-> Dict:
"""simple docstring"""
if config_path is not None:
snake_case_ = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
snake_case_ = UniSpeechSatConfig()
snake_case_ = ''''''
if is_finetuned:
snake_case_ = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE )
else:
snake_case_ = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 531
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowercase ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def __lowercase ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
for char in word:
__lowercase = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def __lowercase ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = set()
for token in tokens:
__lowercase = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
return word_list
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__lowercase = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
__lowercase = bert_tokens
__lowercase , __lowercase = 0, len(_UpperCAmelCase )
while start < end:
__lowercase = True
if is_chinese(bert_word[start] ):
__lowercase = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
__lowercase = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowercase = "##" + bert_word[j]
__lowercase = start + i
__lowercase = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = []
for i in range(0 , len(_UpperCAmelCase ) , 100 ):
__lowercase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
__lowercase = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
__lowercase = []
for i in range(0 , len(_UpperCAmelCase ) , 100 ):
__lowercase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
__lowercase = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = []
for id in input_ids:
__lowercase = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
__lowercase = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
__lowercase = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def __lowercase ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__lowercase = f.readlines()
__lowercase = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowercase = LTP(args.ltp ) # faster in GPU device
__lowercase = BertTokenizer.from_pretrained(args.bert )
__lowercase = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__lowercase = [json.dumps(_UpperCAmelCase ) + "\n" for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 321
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ):
__lowercase = {}
__lowercase = {}
if prompt is not None:
__lowercase = prompt
if generate_kwargs is not None:
__lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
__lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
__lowercase = load_image(lowerCAmelCase_ )
if prompt is not None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase_ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
__lowercase = self.model.config.model_type
if model_type == "git":
__lowercase = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
__lowercase = self.tokenizer(text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids
__lowercase = [self.tokenizer.cls_token_id] + input_ids
__lowercase = torch.tensor(lowerCAmelCase_ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
__lowercase = self.image_processor(images=lowerCAmelCase_ , header_text=lowerCAmelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__lowercase = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
__lowercase = self.tokenizer(lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__lowercase = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__lowercase = None
return model_inputs
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase_ )
and all(x is None for x in model_inputs["input_ids"] )
):
__lowercase = None
if generate_kwargs is None:
__lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__lowercase = model_inputs.pop(self.model.main_input_name )
__lowercase = self.model.generate(lowerCAmelCase_ , **lowerCAmelCase_ , **lowerCAmelCase_ )
return model_outputs
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = []
for output_ids in model_outputs:
__lowercase = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , )
}
records.append(lowerCAmelCase_ )
return records
| 321
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
__a = argparse.ArgumentParser(add_help=UpperCamelCase__ , allow_abbrev=UpperCamelCase__ )
# The main config parser
__a = config_command_parser(UpperCamelCase__ )
# The subparser to add commands to
__a = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(UpperCamelCase__ , parents=[parent_parser] )
update_command_parser(UpperCamelCase__ , parents=[parent_parser] )
return config_parser
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = get_config_parser()
__a = config_parser.parse_args()
if not hasattr(UpperCamelCase__ , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 715
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = 1
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
def extract(*__lowercase : Tuple , **__lowercase : Dict ):
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] ):
'''simple docstring'''
__a = torch.ones([0] )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
self.pixel_values.to(__lowercase )
return self
return Out()
return extract
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger"""
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__a = output.images
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__lowercase )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger"""
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe([prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__a = output.images
__a = torch.Generator(device=__lowercase ).manual_seed(0 )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowercase , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowercase )
assert isinstance(__lowercase , __lowercase )
assert isinstance(pipe.scheduler , __lowercase )
assert pipe.safety_checker is None
__a = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
__a = StableDiffusionPipeline.from_pretrained(__lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__lowercase )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__a = unet.half()
__a = vae.half()
__a = bert.half()
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger"""
__a = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__a = 4003660346
__a = 7
# without safety guidance (sld_guidance_scale = 0)
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowercase )
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = """padme amidala taking a bath artwork, safe for work, no nudity"""
__a = 2734971755
__a = 7
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__a = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__a = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__a = 1044355234
__a = 12
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__a = torch.manual_seed(__lowercase )
__a = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=__lowercase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 547
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase : List[str] = logging.get_logger(__name__)
class A__ ( A__ ):
def __init__( self : Union[str, Any] , *_a : Union[str, Any] , **_a : Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _a , )
super().__init__(*_a , **_a )
| 405
|
'''simple docstring'''
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_UpperCamelCase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 405
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : List[Any] = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
__UpperCAmelCase : Any = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = RealmTokenizer
def __init__( self : int , A : Tuple=None , A : List[Any]=None , A : Tuple=True , A : Union[str, Any]="[UNK]" , A : Optional[Any]="[SEP]" , A : str="[PAD]" , A : List[str]="[CLS]" , A : List[Any]="[MASK]" , A : Optional[Any]=True , A : List[Any]=None , **A : Union[str, Any] , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
__snake_case: int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A ) != tokenize_chinese_chars
):
__snake_case: List[Any] = getattr(A , normalizer_state.pop("""type""" ) )
__snake_case: List[str] = do_lower_case
__snake_case: Optional[int] = strip_accents
__snake_case: Union[str, Any] = tokenize_chinese_chars
__snake_case: Optional[int] = normalizer_class(**A )
__snake_case: Any = do_lower_case
def UpperCAmelCase__ ( self : Dict , A : List[Any] , **A : Union[str, Any] ):
__snake_case: Dict = PaddingStrategy.MAX_LENGTH
__snake_case: Tuple = text
__snake_case: Optional[int] = kwargs.pop("""text_pair""" , A )
__snake_case: Optional[int] = kwargs.pop("""return_tensors""" , A )
__snake_case: Dict = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(A ):
if batch_text_pair is not None:
__snake_case: Union[str, Any] = batch_text_pair[idx]
else:
__snake_case: Optional[Any] = None
__snake_case: Union[str, Any] = super().__call__(A , A , return_tensors=A , **A )
__snake_case: str = encoded_candidates.get("""input_ids""" )
__snake_case: Dict = encoded_candidates.get("""attention_mask""" )
__snake_case: Dict = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(A )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A )
__snake_case: int = {key: item for key, item in output_data.items() if len(A ) != 0}
return BatchEncoding(A , tensor_type=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[int] , A : Dict=None ):
__snake_case: Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Optional[Any] = [self.sep_token_id]
__snake_case: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : List[Any] , A : str , A : Optional[str] = None ):
__snake_case: Union[str, Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 155
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : int , A : int , A : int , A : int , A : float , A : int , A : int , A : int , A : int , A : str , A : bool = False , ):
super().__init__()
__snake_case: List[str] = nn.Embedding(A , A )
__snake_case: Optional[Any] = nn.Embedding(A , A )
__snake_case: Optional[Any] = False
__snake_case: Optional[Any] = nn.Dropout(p=A )
__snake_case: Optional[Any] = TaConfig(
vocab_size=A , d_model=A , num_heads=A , d_kv=A , d_ff=A , dropout_rate=A , feed_forward_proj=A , is_decoder=A , is_encoder_decoder=A , )
__snake_case: Union[str, Any] = nn.ModuleList()
for lyr_num in range(A ):
__snake_case: Optional[int] = TaBlock(A )
self.encoders.append(A )
__snake_case: Optional[int] = TaLayerNorm(A )
__snake_case: List[Any] = nn.Dropout(p=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : int , A : Any ):
__snake_case: Tuple = self.token_embedder(A )
__snake_case: Union[str, Any] = encoder_input_tokens.shape[1]
__snake_case: Dict = torch.arange(A , device=encoder_input_tokens.device )
x += self.position_encoding(A )
__snake_case: Optional[Any] = self.dropout_pre(A )
# inverted the attention mask
__snake_case: Dict = encoder_input_tokens.size()
__snake_case: str = self.get_extended_attention_mask(A , A )
for lyr in self.encoders:
__snake_case: List[str] = lyr(A , A )[0]
__snake_case: Any = self.layer_norm(A )
return self.dropout_post(A ), encoder_inputs_mask
| 155
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCAmelCase__ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCAmelCase__ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ ).logits
lowerCAmelCase__ = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE_ , onehot(SCREAMING_SNAKE_CASE_ , logits.shape[-1] ) ).mean()
lowerCAmelCase__ = -(labels.shape[-1] * loss.item())
lowerCAmelCase__ = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 668
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ["OwlViTFeatureExtractor"]
_snake_case : Optional[int] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81
| 0
|
"""simple docstring"""
def A( snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
lowercase__: Union[str, Any] = str(snake_case_ )
lowercase__: Optional[int] = "".join(sorted(snake_case_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def A( snake_case_ = 99 ):
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
lowercase__: str = 0
lowercase__: List[Any] = 1
while True:
if check_bouncy(snake_case_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(99)}")
| 120
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """scipy"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch", "scipy"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"])
| 120
| 1
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : str = "cpu" , lowercase_ : str = "openai/clip-vit-large-patch14" ):
snake_case_ : List[Any] = device
snake_case_ : Dict = CLIPTokenizerFast.from_pretrained(lowercase_ )
snake_case_ : Optional[Any] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
snake_case_ : Tuple = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
snake_case_ : int = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case_ : List[str] = torchvision.transforms.Resize(224 )
snake_case_ : List[Any] = torchvision.transforms.CenterCrop(224 )
def _snake_case ( self : List[Any] , lowercase_ : Any ):
snake_case_ : Union[str, Any] = self.resize(lowercase_ )
snake_case_ : Optional[Any] = self.center_crop(lowercase_ )
snake_case_ : Tuple = self.normalize(lowercase_ )
return images
def __call__( self : Dict , lowercase_ : List[Any]=None , lowercase_ : Dict=None , **lowercase_ : Any ):
snake_case_ : Tuple = self.tokenizer(text=lowercase_ , **lowercase_ )
snake_case_ : str = self.preprocess_img(lowercase_ )
snake_case_ : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _UpperCAmelCase ( nn.Module):
def __init__( self : List[str] , lowercase_ : Optional[int]=10 , lowercase_ : Tuple=0.01 , lowercase_ : Any=None , lowercase_ : int=None , lowercase_ : str=None , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : List[str]=False , lowercase_ : Optional[Any]=True , lowercase_ : int="image" , lowercase_ : Dict=True , lowercase_ : Any=False , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=False , ):
super().__init__()
snake_case_ : Optional[Any] = None
snake_case_ : List[str] = device if device else get_device()
if vqgan:
snake_case_ : List[str] = vqgan
else:
snake_case_ : Optional[Any] = load_vqgan(self.device , conf_path=lowercase_ , ckpt_path=lowercase_ )
self.vqgan.eval()
if clip:
snake_case_ : Tuple = clip
else:
snake_case_ : List[str] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
snake_case_ : Dict = ProcessorGradientFlow(device=self.device )
snake_case_ : int = iterations
snake_case_ : int = lr
snake_case_ : Optional[Any] = log
snake_case_ : int = make_grid
snake_case_ : Optional[Any] = return_val
snake_case_ : Optional[int] = quantize
snake_case_ : int = self.vqgan.decoder.z_shape
def _snake_case ( self : Optional[int] , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Any=5 , lowercase_ : Tuple=True ):
snake_case_ : Any = []
if output_path is None:
snake_case_ : Union[str, Any] = '''./animation.gif'''
if input_path is None:
snake_case_ : Optional[int] = self.save_path
snake_case_ : List[str] = sorted(glob(input_path + '''/*''' ) )
if not len(lowercase_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowercase_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
snake_case_ : Optional[int] = total_duration / len(lowercase_ )
snake_case_ : Tuple = [frame_duration] * len(lowercase_ )
if extend_frames:
snake_case_ : Dict = 1.5
snake_case_ : str = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowercase_ ) )
imageio.mimsave(lowercase_ , lowercase_ , duration=lowercase_ )
print(f"gif saved to {output_path}" )
def _snake_case ( self : int , lowercase_ : Optional[Any]=None , lowercase_ : str=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
snake_case_ : Any = preprocess(Image.open(lowercase_ ) , target_image_size=256 ).to(self.device )
snake_case_ : Optional[int] = preprocess_vqgan(lowercase_ )
snake_case_, *snake_case_ : Dict = self.vqgan.encode(lowercase_ )
return z
def _snake_case ( self : Union[str, Any] , lowercase_ : Tuple ):
snake_case_ : str = self.latent.detach().requires_grad_()
snake_case_ : Any = base_latent + transform_vector
if self.quantize:
snake_case_, *snake_case_ : Any = self.vqgan.quantize(lowercase_ )
else:
snake_case_ : Dict = trans_latent
return self.vqgan.decode(lowercase_ )
def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[str]=None ):
snake_case_ : Optional[Any] = self.clip_preprocessor(text=lowercase_ , images=lowercase_ , return_tensors='''pt''' , padding=lowercase_ )
snake_case_ : Optional[int] = self.clip(**lowercase_ )
snake_case_ : List[str] = clip_outputs.logits_per_image
if weights is not None:
snake_case_ : Tuple = similarity_logits * weights
return similarity_logits.sum()
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ):
snake_case_ : Tuple = self._get_clip_similarity(pos_prompts['''prompts'''] , lowercase_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
snake_case_ : Union[str, Any] = self._get_clip_similarity(neg_prompts['''prompts'''] , lowercase_ , weights=neg_prompts['''weights'''] )
else:
snake_case_ : Optional[int] = torch.tensor([1] , device=self.device )
snake_case_ : Any = -torch.log(lowercase_ ) + torch.log(lowercase_ )
return loss
def _snake_case ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict ):
snake_case_ : int = torch.randn_like(self.latent , requires_grad=lowercase_ , device=self.device )
snake_case_ : Any = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case_ : str = self._add_vector(lowercase_ )
snake_case_ : Optional[Any] = loop_post_process(lowercase_ )
snake_case_ : str = self._get_CLIP_loss(lowercase_ , lowercase_ , lowercase_ )
print('''CLIP loss''' , lowercase_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowercase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _snake_case ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[Any] ):
wandb.init(reinit=lowercase_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
snake_case_ : Tuple = Image.open(lowercase_ )
snake_case_ : Optional[int] = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(lowercase_ ) )
def _snake_case ( self : Tuple , lowercase_ : Dict ):
if not prompts:
return []
snake_case_ : List[Any] = []
snake_case_ : Union[str, Any] = []
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : Optional[Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowercase_ , (tuple, list) ):
snake_case_ : List[str] = prompt[0]
snake_case_ : Dict = float(prompt[1] )
elif ":" in prompt:
snake_case_, snake_case_ : Any = prompt.split(''':''' )
snake_case_ : Union[str, Any] = float(lowercase_ )
else:
snake_case_ : Tuple = prompt
snake_case_ : Union[str, Any] = 1.0
processed_prompts.append(lowercase_ )
weights.append(lowercase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase_ , device=self.device ),
}
def _snake_case ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Tuple=None , lowercase_ : Tuple=None , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : List[str]=True , lowercase_ : str=True , lowercase_ : Union[str, Any]=None , ):
if image_path:
snake_case_ : str = self._get_latent(lowercase_ )
else:
snake_case_ : Union[str, Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowercase_ , lowercase_ , lowercase_ )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case_ : Union[str, Any] = self.process_prompts(lowercase_ )
snake_case_ : Any = self.process_prompts(lowercase_ )
if save_final and save_path is None:
snake_case_ : str = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
else:
snake_case_ : Optional[int] = save_path + '''_''' + get_timestamp()
os.makedirs(lowercase_ )
snake_case_ : List[str] = save_path
snake_case_ : int = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowercase_ ) )
snake_case_ : Union[str, Any] = loop_post_process(lowercase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase_ , lowercase_ , lowercase_ ) ):
if show_intermediate:
show_pil(lowercase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowercase_ )} )
if show_final:
show_pil(lowercase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png" ) )
| 123
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123
| 1
|
'''simple docstring'''
def __lowercase (_lowercase = 100 ) -> int:
"""simple docstring"""
__lowerCamelCase : Any = (n * (n + 1) // 2) ** 2
__lowerCamelCase : Optional[int] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 483
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCAmelCase__ :List[str] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCAmelCase__ :Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __lowercase (_lowercase, _lowercase ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(_lowercase ) - np.asarray(_lowercase )) ** 2 ) )
def __lowercase (_lowercase, _lowercase ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(_lowercase, _lowercase ) ) ** (1 / 2)
if __name__ == "__main__":
def __lowercase () -> None:
"""simple docstring"""
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""", number=10_000, globals=globals(), ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""", number=10_000, globals=globals(), ) )
benchmark()
| 483
| 1
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
lowercase__ : str = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowercase__ : Optional[Any] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
lowercase__ : Optional[Any] = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = """naver-clova-ix/donut-base-finetuned-docvqa"""
__lowerCamelCase : List[Any] = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__lowerCamelCase : str = """document_qa"""
__lowerCamelCase : Union[str, Any] = AutoProcessor
__lowerCamelCase : Optional[int] = VisionEncoderDecoderModel
__lowerCamelCase : Optional[int] = ["""image""", """text"""]
__lowerCamelCase : Any = ["""text"""]
def __init__( self , *a , **a):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.')
super().__init__(*a , **a)
def snake_case_ ( self , a , a):
lowercase__ : List[Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
lowercase__ : int = task_prompt.replace('{user_input}' , a)
lowercase__ : Dict = self.pre_processor.tokenizer(
a , add_special_tokens=a , return_tensors='pt').input_ids
lowercase__ : Union[str, Any] = self.pre_processor(a , return_tensors='pt').pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def snake_case_ ( self , a):
return self.model.generate(
inputs['pixel_values'].to(self.device) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences
def snake_case_ ( self , a):
lowercase__ : Optional[Any] = self.pre_processor.batch_decode(a)[0]
lowercase__ : Tuple = sequence.replace(self.pre_processor.tokenizer.eos_token , '')
lowercase__ : Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token , '')
lowercase__ : str = re.sub(r'<.*?>' , '' , a , count=1).strip() # remove first task start token
lowercase__ : Optional[Any] = self.pre_processor.tokenajson(a)
return sequence["answer"]
| 164
| 1
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="attention" ):
"""simple docstring"""
lowercase__ : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowercase__ : List[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase__ : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowercase__ : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase__ : Optional[int] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowercase__ : str = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase__ : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowercase__ : Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
if split_mlp_wi:
lowercase__ : List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase__ : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase__ : int = (wi_a, wi_a)
else:
lowercase__ : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase__ : Tuple = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __lowerCamelCase ( lowerCamelCase__ , *, lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : Any = traverse_util.flatten_dict(variables["target"] )
lowercase__ : Optional[int] = {"/".join(lowerCamelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : Tuple = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , lowerCamelCase__ )
lowercase__ : List[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : Optional[Any] = old["token_embedder/embedding"]
# Encoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase__ : Dict = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_attention_layer_norm" )
lowercase__ : Dict = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "attention" )
lowercase__ : List[Any] = layer_norm
lowercase__ : Optional[int] = k.T
lowercase__ : Union[str, Any] = o.T
lowercase__ : Any = q.T
lowercase__ : Dict = v.T
# Block i, layer 1 (MLP).
lowercase__ : Optional[int] = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_mlp_layer_norm" )
lowercase__ : Optional[int] = tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , lowerCamelCase__ )
lowercase__ : Any = layer_norm
if split_mlp_wi:
lowercase__ : Tuple = wi[0].T
lowercase__ : Optional[int] = wi[1].T
else:
lowercase__ : Optional[int] = wi.T
lowercase__ : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : Dict = tax_relpos_bias_lookup(
lowerCamelCase__ , lowerCamelCase__ , "encoder" ).T
lowercase__ : Tuple = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase__ : Tuple = tax_relpos_bias_lookup(
lowerCamelCase__ , 0 , "encoder" ).T
lowercase__ : Optional[Any] = tax_relpos_bias_lookup(
lowerCamelCase__ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase__ : int = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_self_attention_layer_norm" )
lowercase__ : Optional[Any] = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "self_attention" )
lowercase__ : Dict = layer_norm
lowercase__ : List[str] = k.T
lowercase__ : Dict = o.T
lowercase__ : Optional[int] = q.T
lowercase__ : Dict = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : Union[str, Any] = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_cross_attention_layer_norm" )
lowercase__ : List[Any] = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "encoder_decoder_attention" )
lowercase__ : Tuple = layer_norm
lowercase__ : Union[str, Any] = k.T
lowercase__ : Optional[Any] = o.T
lowercase__ : Optional[Any] = q.T
lowercase__ : str = v.T
# Block i, layer 2 (MLP).
lowercase__ : List[Any] = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_mlp_layer_norm" )
lowercase__ : List[Any] = tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , lowerCamelCase__ )
lowercase__ : str = layer_norm
if split_mlp_wi:
lowercase__ : Any = wi[0].T
lowercase__ : List[Any] = wi[1].T
else:
lowercase__ : Optional[Any] = wi.T
lowercase__ : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : int = tax_relpos_bias_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" ).T
lowercase__ : Any = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : int = old["decoder/logits_dense/kernel"].T
return new
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : Tuple = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : List[Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase__ : List[str] = state_dict["shared.weight"]
return state_dict
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = checkpoints.load_tax_checkpoint(lowerCamelCase__ )
lowercase__ : int = convert_tax_to_pytorch(
lowerCamelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase__ , scalable_attention=lowerCamelCase__ )
lowercase__ : Tuple = make_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = False , ):
"""simple docstring"""
lowercase__ : Dict = MTaConfig.from_json_file(lowerCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : Optional[int] = UMTaEncoderModel(lowerCamelCase__ )
else:
lowercase__ : Optional[int] = UMTaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCamelCase__ )
print("Done" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCAmelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 703
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase : Optional[int] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase : int = {
'''gpt-neox-20b''': 2048,
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowercase ) != add_prefix_space:
snake_case_ : Any = getattr(_lowercase , pre_tok_state.pop("""type""" ) )
snake_case_ : Tuple = add_prefix_space
snake_case_ : str = pre_tok_class(**_lowercase )
snake_case_ : int = add_prefix_space
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Any = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowercase , add_special_tokens=_lowercase ) + [self.eos_token_id] )
if len(_lowercase ) > self.model_max_length:
snake_case_ : Any = input_ids[-self.model_max_length :]
return input_ids
| 58
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __lowerCamelCase ( UpperCAmelCase_ = "" , ) ->bool:
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def __lowerCamelCase ( UpperCAmelCase_ = "" ) ->bool:
if len(UpperCAmelCase_ ) == 0:
return True
snake_case__ = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
snake_case__ = {}
for character in lower_case_input_str:
snake_case__ = character_freq_dict.get(UpperCAmelCase_ , 0 ) + 1
snake_case__ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCamelCase ( UpperCAmelCase_ = "" ) ->None:
print('\nFor string = ' , UpperCAmelCase_ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase_ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(UpperCAmelCase_ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
a__ : str = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
a__ : List[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 368
| 0
|
def a ( lowerCamelCase_ = 100 ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 671
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671
| 1
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE: Tuple = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: Any = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE: List[Any] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE: Tuple = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Dict = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__SCREAMING_SNAKE_CASE: int = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE: List[Any] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE: Union[str, Any] = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__SCREAMING_SNAKE_CASE: Dict = features.copy()
__SCREAMING_SNAKE_CASE: Optional[int] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE: Union[str, Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Tuple = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: Dict = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Any:
"""simple docstring"""
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: Optional[Any] = jsonl_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: List[str] = [jsonl_path]
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: Tuple = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any]=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
__SCREAMING_SNAKE_CASE: Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE: Dict = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: str = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE: Optional[int] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE: Dict = JsonDatasetReader({'''train''': jsonl_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
if split:
__SCREAMING_SNAKE_CASE: str = {split: jsonl_path}
else:
__SCREAMING_SNAKE_CASE: Dict = '''train'''
__SCREAMING_SNAKE_CASE: Tuple = {'''train''': jsonl_path, '''test''': jsonl_path}
__SCREAMING_SNAKE_CASE: Tuple = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE: int = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase ( UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
return json.load(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
return [json.loads(UpperCamelCase__ ) for line in buffer]
class a :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , lines=_lowerCAmelCase ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE: str = load_json_function(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert isinstance(exported_content[0] , _lowerCAmelCase )
assert len(_lowerCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , lines=_lowerCAmelCase , orient=_lowerCAmelCase ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE: Optional[int] = load_json(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowerCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowerCAmelCase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , lines=_lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = load_json_function(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert isinstance(exported_content[0] , _lowerCAmelCase )
assert len(_lowerCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , lines=_lowerCAmelCase , orient=_lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE: Dict = load_json(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowerCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowerCAmelCase ) == 10
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(_lowerCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = tmp_path_factory.mktemp('''data''' ) / f"""test.json.{extension}"""
__SCREAMING_SNAKE_CASE: Any = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_lowerCAmelCase , _lowerCAmelCase , compression=_lowerCAmelCase ).write()
with fsspec.open(_lowerCAmelCase , '''rb''' , compression='''infer''' ) as f:
__SCREAMING_SNAKE_CASE: Optional[Any] = f.read()
with fsspec.open(_lowerCAmelCase , '''rb''' , compression='''infer''' ) as f:
__SCREAMING_SNAKE_CASE: Any = f.read()
assert exported_content == original_content
| 202
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''data2vec-vision'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=[3, 5, 7, 11] , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=256 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=255 , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = hidden_size
__SCREAMING_SNAKE_CASE: Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE: Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE: int = intermediate_size
__SCREAMING_SNAKE_CASE: Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE: int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Any = initializer_range
__SCREAMING_SNAKE_CASE: Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE: Optional[Any] = image_size
__SCREAMING_SNAKE_CASE: List[str] = patch_size
__SCREAMING_SNAKE_CASE: Optional[int] = num_channels
__SCREAMING_SNAKE_CASE: List[Any] = use_mask_token
__SCREAMING_SNAKE_CASE: Tuple = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE: Any = use_relative_position_bias
__SCREAMING_SNAKE_CASE: Dict = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE: Any = layer_scale_init_value
__SCREAMING_SNAKE_CASE: List[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE: int = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE: Union[str, Any] = out_indices
__SCREAMING_SNAKE_CASE: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE: Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE: Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE: Optional[Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE: Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE: Any = auxiliary_concat_input
__SCREAMING_SNAKE_CASE: List[str] = semantic_loss_ignore_index
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 1e-4
| 202
| 1
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]:
A : str = max_length
A : Optional[int] = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool:
A : List[Any] = input_ids.shape[-1]
A : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
A : str = start_length
A : Optional[Any] = max_new_tokens
A : Dict = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]:
A : str = max_time
A : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = stopping_criteria.max_length
A : Any = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria
| 17
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"question": Value("string" ), "context": Value("string" )} )
a__ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 17
| 1
|
"""simple docstring"""
from __future__ import annotations
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((lowerCAmelCase__) , (lowerCAmelCase__)) :Optional[Any] = extended_euclid(_SCREAMING_SNAKE_CASE , a % b )
lowerCAmelCase__ :int = a // b
return (y, x - k * y)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
((lowerCAmelCase__) , (lowerCAmelCase__)) :List[Any] = extended_euclid(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = na * na
lowerCAmelCase__ :int = ra * x * na + ra * y * na
return (n % m + m) % m
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
((lowerCAmelCase__) , (lowerCAmelCase__)) :int = extended_euclid(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if b < 0:
lowerCAmelCase__ :Tuple = (b % n + n) % n
return b
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = invert_modulo(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), invert_modulo(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = na * na
lowerCAmelCase__ :int = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 93
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ = None ):
'''simple docstring'''
if components is None:
UpperCamelCase__ :List[Any] = []
UpperCamelCase__ :Optional[Any] = list(UpperCamelCase_ )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(UpperCamelCase_ , self.__components ) ) + ")"
def __add__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = len(self )
if size == len(UpperCamelCase_ ):
UpperCamelCase__ :Dict = [self.__components[i] + other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )]
return Vector(UpperCamelCase_ )
else:
raise Exception('''must have the same size''' )
def __sub__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = len(self )
if size == len(UpperCamelCase_ ):
UpperCamelCase__ :List[str] = [self.__components[i] - other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )]
return Vector(UpperCamelCase_ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , (float, int) ):
UpperCamelCase__ :Optional[Any] = [c * other for c in self.__components]
return Vector(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(self ) == len(UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = len(self )
UpperCamelCase__ :List[Any] = [self.__components[i] * other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )]
return sum(UpperCamelCase_ )
else: # error case
raise Exception('''invalid operand!''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return Vector(self.__components )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
UpperCamelCase__ :Optional[int] = value
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
UpperCamelCase__ :List[str] = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = False ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self * other
UpperCamelCase__ :Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def a ( __a ) -> Vector:
'''simple docstring'''
assert isinstance(__a , __a )
return Vector([0] * dimension )
def a ( __a , __a ) -> Vector:
'''simple docstring'''
assert isinstance(__a , __a ) and (isinstance(__a , __a ))
UpperCamelCase__ :str = [0] * dimension
UpperCamelCase__ :Tuple = 1
return Vector(__a )
def a ( __a , __a , __a ) -> Vector:
'''simple docstring'''
assert (
isinstance(__a , __a )
and isinstance(__a , __a )
and (isinstance(__a , (int, float) ))
)
return x * scalar + y
def a ( __a , __a , __a ) -> Vector:
'''simple docstring'''
random.seed(__a )
UpperCamelCase__ :Optional[Any] = [random.randint(__a , __a ) for _ in range(__a )]
return Vector(__a )
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = matrix
UpperCamelCase__ :Union[str, Any] = w
UpperCamelCase__ :Optional[int] = h
def __str__( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , UpperCamelCase_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :int = []
for i in range(self.__height ):
UpperCamelCase__ :int = [
self.__matrix[i][j] + other.component(UpperCamelCase_ , UpperCamelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase_ )
return Matrix(UpperCamelCase_ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , UpperCamelCase_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :Optional[Any] = []
for i in range(self.__height ):
UpperCamelCase__ :Optional[int] = [
self.__matrix[i][j] - other.component(UpperCamelCase_ , UpperCamelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase_ )
return Matrix(UpperCamelCase_ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ): # matrix-vector
if len(UpperCamelCase_ ) == self.__width:
UpperCamelCase__ :Any = zero_vector(self.__height )
for i in range(self.__height ):
UpperCamelCase__ :Any = [
self.__matrix[i][j] * other.component(UpperCamelCase_ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase_ , sum(UpperCamelCase_ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(UpperCamelCase_ , (int, float) ): # matrix-scalar
UpperCamelCase__ :Union[str, Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase_ , self.__width , self.__height )
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.__height
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.__width
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCamelCase__ :Tuple = value
else:
raise Exception('''change_component: indices out of bounds''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
UpperCamelCase__ :str = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase_ ) ):
UpperCamelCase__ :Union[str, Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase_ , UpperCamelCase_ )
else:
raise Exception('''Indices out of bounds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCamelCase__ :Optional[Any] = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase_ ) for y in range(self.__width )
]
return sum(UpperCamelCase_ )
def a ( __a ) -> Matrix:
'''simple docstring'''
UpperCamelCase__ :list[list[float]] = [[0] * n for _ in range(__a )]
return Matrix(__a , __a , __a )
def a ( __a , __a , __a , __a ) -> Matrix:
'''simple docstring'''
random.seed(__a )
UpperCamelCase__ :list[list[float]] = [
[random.randint(__a , __a ) for _ in range(__a )] for _ in range(__a )
]
return Matrix(__a , __a , __a )
| 189
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( _A : int , _A : Tuple )-> str:
"""simple docstring"""
A__ = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ = old_name.split("." )
if layer == "0":
A__ = old_name.replace("0" , "convolution1" )
elif layer == "1":
A__ = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
A__ = old_name.replace("3" , "convolution2" )
else:
A__ = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , _A ):
A__ = R"\b\d{2}\b"
if bool(re.search(_A , _A ) ):
A__ = re.search(R"\d\.\d\d." , _A ).group()
else:
A__ = re.search(R"\d\.\d." , _A ).group()
if int(match[0] ) < 6:
A__ = old_name.replace(_A , "" )
A__ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
A__ = "intermediate_stages." + trimmed_name
else:
A__ = old_name.replace(_A , "" )
if int(match[2] ) < num_meta4D_last_stage:
A__ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
A__ = str(int(match[2] ) - num_meta4D_last_stage )
A__ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
A__ = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
A__ = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
A__ = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
A__ = trimmed_name.replace("fc2" , "linear_out" )
A__ = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , _A ):
A__ = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
A__ = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
A__ = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
A__ = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
A__ = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
A__ = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ = new_name.replace("norm" , "layernorm" )
A__ = "efficientformer." + new_name
else:
A__ = "efficientformer.encoder." + new_name
return new_name
def UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
for key in checkpoint.copy().keys():
A__ = checkpoint.pop(_A )
A__ = val
return checkpoint
def UpperCamelCase ( )-> Optional[Any]:
"""simple docstring"""
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_A , stream=_A ).raw )
return image
def UpperCamelCase ( _A : Path , _A : Path , _A : Path , _A : bool )-> Union[str, Any]:
"""simple docstring"""
A__ = torch.load(_A , map_location="cpu" )["model"]
A__ = EfficientFormerConfig.from_json_file(_A )
A__ = EfficientFormerForImageClassificationWithTeacher(_A )
A__ = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
A__ = config.depths[-1] - config.num_metaad_blocks + 1
A__ = convert_torch_checkpoint(_A , _A )
model.load_state_dict(_A )
model.eval()
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
A__ = prepare_img()
A__ = 256
A__ = 224
A__ = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
A__ = processor(images=_A , return_tensors="pt" ).pixel_values
# original processing pipeline
A__ = Compose(
[
Resize(_A , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_A ),
ToTensor(),
Normalize(_A , _A ),
] )
A__ = image_transforms(_A ).unsqueeze(0 )
assert torch.allclose(_A , _A )
A__ = model(_A )
A__ = outputs.logits
A__ = (1, 1000)
if "l1" in model_name:
A__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_A )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=_A , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=_A , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : str = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 232
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase_ : int = pytest.mark.integration
@require_faiss
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(UpperCAmelCase__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
A__ = dset.map(
lambda UpperCAmelCase__ , UpperCAmelCase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ )
A__ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
A__ , A__ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
A__ , A__ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self ):
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
A__ , A__ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self ):
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(UpperCAmelCase__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __A ( self ):
from elasticsearch import Elasticsearch
A__ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
A__ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
A__ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
A__ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=UpperCAmelCase__ )
A__ , A__ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
A__ = np.eye(5 , dtype=np.floataa )[::-1]
A__ , A__ = index.search_batch(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search_batch , queries[0] )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase__ )
def __A ( self ):
import faiss
A__ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
A__ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase__ ):
A__ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __A ( self ):
import faiss
A__ = faiss.IndexFlat(5 )
A__ = FaissIndex(custom_index=UpperCAmelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __A ( self ):
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
index.save(tmp_file.name )
A__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCamelCase ( _A : Union[str, Any] )-> List[Any]:
"""simple docstring"""
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
A__ = "index.faiss"
A__ = f"""mock://{index_name}"""
index.save(_A , storage_options=mockfs.storage_options )
A__ = FaissIndex.load(_A , storage_options=mockfs.storage_options )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(_A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
A__ = Elasticsearch()
A__ = {"acknowledged": True}
A__ = ElasticSearchIndex(es_client=UpperCAmelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
A__ = "foo"
A__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
A__ , A__ = index.search(UpperCAmelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
A__ = "foo"
A__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
A__ , A__ = index.search(UpperCAmelCase__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
A__ = ["foo", "bar", "foobar"]
A__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
A__ , A__ = index.search_batch(UpperCAmelCase__ )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
# batched queries with timeout
A__ = ["foo", "bar", "foobar"]
A__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
A__ , A__ = index.search_batch(UpperCAmelCase__ , request_timeout=30 )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
| 232
| 1
|
def _lowerCamelCase ( __lowerCamelCase = 6008_5147_5143 ) -> int:
'''simple docstring'''
try:
UpperCAmelCase__ : Union[str, Any] = int(__lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Optional[Any] = 2
while i * i <= n:
while n % i == 0:
UpperCAmelCase__ : List[str] = i
n //= i
i += 1
if n > 1:
UpperCAmelCase__ : int = n
return int(__lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 79
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy"
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 4, 64, 64) , _lowerCAmelCase=False ):
UpperCAmelCase__ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase__ : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return image
def __UpperCAmelCase ( self , _lowerCAmelCase=False , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" ):
UpperCAmelCase__ : int = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase__ : Optional[Any] = """bf16""" if fpaa else None
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = FlaxUNetaDConditionModel.from_pretrained(
_lowerCAmelCase , subfolder="""unet""" , dtype=_lowerCAmelCase , revision=_lowerCAmelCase )
return model, params
def __UpperCAmelCase ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 77, 768) , _lowerCAmelCase=False ):
UpperCAmelCase__ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase__ : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_latents(_lowerCAmelCase , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Dict = self.get_encoder_hidden_states(_lowerCAmelCase , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = model.apply(
{"""params""": params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
UpperCAmelCase__ : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCAmelCase__ : List[Any] = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_latents(_lowerCAmelCase , shape=(4, 4, 96, 96) , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Any = self.get_encoder_hidden_states(_lowerCAmelCase , shape=(4, 77, 1024) , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Dict = model.apply(
{"""params""": params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
UpperCAmelCase__ : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCAmelCase__ : Any = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
| 79
| 1
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Any ,lowerCamelCase_ : List[str]):
'''simple docstring'''
lowerCAmelCase__ : str = 1.5
lowerCAmelCase__ : Union[str, Any] = int(factor * num_class_images)
lowerCAmelCase__ : Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' ,indice_name='''laion_400m''' ,num_images=lowerCamelCase_ ,aesthetic_weight=0.1)
os.makedirs(f"""{class_data_dir}/images""" ,exist_ok=lowerCamelCase_)
if len(list(Path(f"""{class_data_dir}/images""").iterdir())) >= num_class_images:
return
while True:
lowerCAmelCase__ : Any = client.query(text=lowerCamelCase_)
if len(lowerCamelCase_) >= factor * num_class_images or num_images > 1E4:
break
else:
lowerCAmelCase__ : List[Any] = int(factor * num_images)
lowerCAmelCase__ : Optional[int] = ClipClient(
url='''https://knn.laion.ai/knn-service''' ,indice_name='''laion_400m''' ,num_images=lowerCamelCase_ ,aesthetic_weight=0.1 ,)
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[str] = tqdm(desc='''downloading real regularization images''' ,total=lowerCamelCase_)
with open(f"""{class_data_dir}/caption.txt""" ,'''w''') as fa, open(f"""{class_data_dir}/urls.txt""" ,'''w''') as fa, open(
f"""{class_data_dir}/images.txt""" ,'''w''') as fa:
while total < num_class_images:
lowerCAmelCase__ : List[str] = class_images[count]
count += 1
try:
lowerCAmelCase__ : List[str] = requests.get(images['''url'''])
if img.status_code == 200:
lowerCAmelCase__ : List[Any] = Image.open(BytesIO(img.content))
with open(f"""{class_data_dir}/images/{total}.jpg""" ,'''wb''') as f:
f.write(img.content)
fa.write(images['''caption'''] + '''\n''')
fa.write(images['''url'''] + '''\n''')
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '''\n''')
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser('''''' ,add_help=lowerCamelCase_)
parser.add_argument('''--class_prompt''' ,help='''text prompt to retrieve images''' ,required=lowerCamelCase_ ,type=lowerCamelCase_)
parser.add_argument('''--class_data_dir''' ,help='''path to save images''' ,required=lowerCamelCase_ ,type=lowerCamelCase_)
parser.add_argument('''--num_class_images''' ,help='''number of images to download''' ,default=200 ,type=lowerCamelCase_)
return parser.parse_args()
if __name__ == "__main__":
__snake_case : Union[str, Any] =parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 715
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__snake_case : str ='\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__snake_case : Tuple ='\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__snake_case : int ='\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Dict):
'''simple docstring'''
return float((preds == labels).mean())
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : int = simple_accuracy(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Any = float(fa_score(y_true=lowerCamelCase_ ,y_pred=lowerCamelCase_))
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = float(pearsonr(lowerCamelCase_ ,lowerCamelCase_)[0])
lowerCAmelCase__ : int = float(spearmanr(lowerCamelCase_ ,lowerCamelCase_)[0])
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase ,__lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__lowerCamelCase ,__lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__lowerCamelCase ,__lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__lowerCamelCase ,__lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 90
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_=0.999 , UpperCAmelCase_="cosine" , ) ->Any:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case__ = []
for i in range(UpperCAmelCase_ ):
snake_case__ = i / num_diffusion_timesteps
snake_case__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
class __snake_case ( __magic_name__ , __magic_name__ ):
__lowerCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
__lowerCAmelCase = 2
@register_to_config
def __init__( self , UpperCamelCase_ = 1000 , UpperCamelCase_ = 0.0_0_0_8_5 , UpperCamelCase_ = 0.0_1_2 , UpperCamelCase_ = "linear" , UpperCamelCase_ = None , UpperCamelCase_ = "epsilon" , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = 1.0 , UpperCamelCase_ = "linspace" , UpperCamelCase_ = 0 , ) -> Any:
if trained_betas is not None:
snake_case__ = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case__ = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ = betas_for_alpha_bar(UpperCamelCase_ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
snake_case__ = betas_for_alpha_bar(UpperCamelCase_ , alpha_transform_type='exp' )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case__ = 1.0 - self.betas
snake_case__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
snake_case__ = use_karras_sigmas
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ) -> str:
if schedule_timesteps is None:
snake_case__ = self.timesteps
snake_case__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case__ = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
snake_case__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _snake_case ( self ) -> int:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , ) -> torch.FloatTensor:
snake_case__ = self.index_for_timestep(UpperCamelCase_ )
snake_case__ = self.sigmas[step_index]
snake_case__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> List[Any]:
snake_case__ = num_inference_steps
snake_case__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case__ = np.log(UpperCamelCase_ )
snake_case__ = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
if self.config.use_karras_sigmas:
snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase_ , num_inference_steps=self.num_inference_steps )
snake_case__ = np.array([self._sigma_to_t(UpperCamelCase_ , UpperCamelCase_ ) for sigma in sigmas] )
snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case__ = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case__ = torch.from_numpy(UpperCamelCase_ )
snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCamelCase_ ).startswith('mps' ):
# mps does not support float64
snake_case__ = timesteps.to(UpperCamelCase_ , dtype=torch.floataa )
else:
snake_case__ = timesteps.to(device=UpperCamelCase_ )
# empty dt and derivative
snake_case__ = None
snake_case__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case__ = defaultdict(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
# get log sigma
snake_case__ = np.log(UpperCamelCase_ )
# get distribution
snake_case__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case__ = low_idx + 1
snake_case__ = log_sigmas[low_idx]
snake_case__ = log_sigmas[high_idx]
# interpolate sigmas
snake_case__ = (low - log_sigma) / (low - high)
snake_case__ = np.clip(UpperCamelCase_ , 0 , 1 )
# transform interpolation to time range
snake_case__ = (1 - w) * low_idx + w * high_idx
snake_case__ = t.reshape(sigma.shape )
return t
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ) -> torch.FloatTensor:
snake_case__ = in_sigmas[-1].item()
snake_case__ = in_sigmas[0].item()
snake_case__ = 7.0 # 7.0 is the value used in the paper
snake_case__ = np.linspace(0 , 1 , UpperCamelCase_ )
snake_case__ = sigma_min ** (1 / rho)
snake_case__ = sigma_max ** (1 / rho)
snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _snake_case ( self ) -> Any:
return self.dt is None
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
snake_case__ = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case__ = self.sigmas[step_index]
snake_case__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case__ = self.sigmas[step_index - 1]
snake_case__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case__ = 0
snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
snake_case__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case__ = sigma_next - sigma_hat
# store for 2nd order step
snake_case__ = derivative
snake_case__ = dt
snake_case__ = sample
else:
# 2. 2nd order / Heun's method
snake_case__ = (sample - pred_original_sample) / sigma_next
snake_case__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case__ = self.dt
snake_case__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case__ = self.timesteps.to(original_samples.device )
snake_case__ = timesteps.to(original_samples.device )
snake_case__ = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
snake_case__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case__ = sigma.unsqueeze(-1 )
snake_case__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 368
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
a__ : int = logging.get_logger(__name__)
class __snake_case ( __magic_name__ ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 368
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A_ = logging.get_logger('''transformers.models.speecht5''')
A_ = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
A_ = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
A_ = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
A_ = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
A_ = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
A_ = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
A_ = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
A_ = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
A_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ = []
A_ = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
A_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
A_ = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
A_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Tuple = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : int = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_snake_case : List[str] = value
elif weight_type == "weight_g":
_snake_case : Tuple = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : Any = value
elif weight_type == "running_mean":
_snake_case : int = value
elif weight_type == "running_var":
_snake_case : Tuple = value
elif weight_type == "num_batches_tracked":
_snake_case : int = value
else:
_snake_case : Union[str, Any] = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_snake_case , _snake_case : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : List[Any] = []
if task == "s2t":
_snake_case : List[str] = hf_model.speechta.encoder.prenet.feature_encoder
_snake_case : List[Any] = MAPPING_S2T
_snake_case : str = IGNORE_KEYS_S2T
elif task == "t2s":
_snake_case : int = None
_snake_case : Optional[Any] = MAPPING_T2S
_snake_case : List[Any] = IGNORE_KEYS_T2S
elif task == "s2s":
_snake_case : str = hf_model.speechta.encoder.prenet.feature_encoder
_snake_case : List[Any] = MAPPING_S2S
_snake_case : str = IGNORE_KEYS_S2S
else:
raise ValueError(F"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(snake_case__ , snake_case__ ):
logger.info(F"{name} was ignored" )
continue
_snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : List[str] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_snake_case , _snake_case : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
_snake_case : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_snake_case : Union[str, Any] = True
if "*" in mapped_key:
_snake_case : Optional[Any] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : int = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : Dict = """weight_g"""
elif "weight_v" in name:
_snake_case : str = """weight_v"""
elif "bias" in name:
_snake_case : Optional[int] = """bias"""
elif "weight" in name:
_snake_case : Optional[int] = """weight"""
elif "running_mean" in name:
_snake_case : List[Any] = """running_mean"""
elif "running_var" in name:
_snake_case : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
_snake_case : str = """num_batches_tracked"""
else:
_snake_case : int = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Any ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Tuple = name.split(""".""" )
_snake_case : Optional[Any] = int(items[0] )
_snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_snake_case : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_snake_case : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_snake_case : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Dict=None , snake_case__ : List[str]=None , snake_case__ : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
_snake_case : Tuple = SpeechTaConfig.from_pretrained(snake_case__ )
else:
_snake_case : str = SpeechTaConfig()
if task == "s2t":
_snake_case : Union[str, Any] = config.max_text_positions
_snake_case : Optional[int] = SpeechTaForSpeechToText(snake_case__ )
elif task == "t2s":
_snake_case : Any = 18_76
_snake_case : str = 6_00
_snake_case : List[Any] = config.max_speech_positions
_snake_case : Dict = SpeechTaForTextToSpeech(snake_case__ )
elif task == "s2s":
_snake_case : Any = 18_76
_snake_case : str = config.max_speech_positions
_snake_case : Tuple = SpeechTaForSpeechToSpeech(snake_case__ )
else:
raise ValueError(F"Unknown task name: {task}" )
if vocab_path:
_snake_case : str = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_snake_case : Optional[int] = AddedToken("""<mask>""" , lstrip=snake_case__ , rstrip=snake_case__ )
_snake_case : List[Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_snake_case : Dict = SpeechTaFeatureExtractor()
_snake_case : Any = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(snake_case__ )
_snake_case : List[Any] = torch.load(snake_case__ )
recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case__ , snake_case__ )
model.save_pretrained(snake_case__ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 716
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Union[str, Any], a_: Tuple=13, a_: Dict=32, a_: Optional[Any]=3, a_: Optional[Any]=4, a_: Optional[int]=[10, 20, 30, 40], a_: Any=[2, 2, 3, 2], a_: Dict=True, a_: Dict=True, a_: List[str]=37, a_: Dict="gelu", a_: List[str]=10, a_: Union[str, Any]=0.02, a_: Any=["stage2", "stage3", "stage4"], a_: Optional[int]=3, a_: Tuple=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : int = num_channels
_snake_case : Tuple = num_stages
_snake_case : int = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : Dict = use_labels
_snake_case : List[str] = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Any = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = num_labels
_snake_case : int = scope
_snake_case : Dict = num_stages
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=a_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=a_, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict, a_: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = UperNetModelTester(self )
_snake_case : Dict = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(a_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Tuple = [*signature.parameters.keys()]
_snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(a_: Dict, a_: List[str], a_: Optional[int] ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : List[str] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(a_ )
_snake_case : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : List[Any] = Image.open(snake_case__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ )
_snake_case : Dict = prepare_img()
_snake_case : str = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Tuple = model(**a_ )
_snake_case : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : int = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ )
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = processor(images=a_, return_tensors="""pt""" ).to(a_ )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a_ )
_snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], a_, atol=1E-4 ) )
| 28
| 0
|
'''simple docstring'''
import math
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool:
return math.sqrt(UpperCAmelCase_ ) * math.sqrt(UpperCAmelCase_ ) == num
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : str = n
while left <= right:
__lowerCamelCase : int = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__lowerCamelCase : int = mid - 1
else:
__lowerCamelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
|
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
A =list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
__a : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
__a : Optional[str] = field(default=__a , metadata={"""help""": """A folder containing the training data."""} )
__a : Optional[str] = field(default=__a , metadata={"""help""": """A folder containing the validation data."""} )
__a : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
__a : int = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
__a : float = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = {}
if self.train_dir is not None:
UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase = self.validation_dir
UpperCAmelCase = data_files if data_files else None
@dataclass
class _a :
__a : str = field(
default=__a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__a )} , )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
__a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a : str = field(default=__a , metadata={"""help""": """Name or path of preprocessor config."""} )
__a : bool = field(
default=__a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
__a : Optional[int] = field(
default=__a , metadata={"""help""": """Stride to use for the encoder."""} , )
class _a :
def __init__( self : List[Any] , lowercase : Union[str, Any]=192 , lowercase : Union[str, Any]=32 , lowercase : Optional[int]=4 , lowercase : Union[str, Any]=0.6 ):
'''simple docstring'''
UpperCAmelCase = input_size
UpperCAmelCase = mask_patch_size
UpperCAmelCase = model_patch_size
UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
UpperCAmelCase = self.input_size // self.mask_patch_size
UpperCAmelCase = self.mask_patch_size // self.model_patch_size
UpperCAmelCase = self.rand_size**2
UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Dict ):
'''simple docstring'''
UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase = np.zeros(self.token_count , dtype=lowercase )
UpperCAmelCase = 1
UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def snake_case_ (_a : Any ):
UpperCAmelCase = torch.stack([example['''pixel_values'''] for example in examples] )
UpperCAmelCase = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , _a , _a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
UpperCAmelCase = ds['''train'''].train_test_split(data_args.train_val_split )
UpperCAmelCase = split['''train''']
UpperCAmelCase = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_a )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_a , '''decoder_type''' ):
UpperCAmelCase = '''simmim'''
# adapt config
UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_a )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_a )
else:
UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_a )
if training_args.do_train:
UpperCAmelCase = ds['''train'''].column_names
else:
UpperCAmelCase = ds['''validation'''].column_names
if data_args.image_column_name is not None:
UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase = '''image'''
elif "img" in column_names:
UpperCAmelCase = '''img'''
else:
UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase = Compose(
[
Lambda(lambda _a : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_a : int ):
UpperCAmelCase = [transforms(_a ) for image in examples[image_column_name]]
UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
UpperCAmelCase = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_a )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('''eval''' , _a )
trainer.save_metrics('''eval''' , _a )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
if __name__ == "__main__":
main()
| 358
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : str , _a : list[str] | None = None ):
UpperCAmelCase = word_bank or []
# create a table
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = []
for _ in range(_a ):
table.append([] )
# seed value
UpperCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_a ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_a )] == word:
UpperCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_a )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_a )]:
combination.reverse()
return table[len(_a )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 358
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : str = DebertaTokenizer
snake_case_ : Union[str, Any] = True
snake_case_ : Optional[int] = DebertaTokenizerFast
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
__snake_case = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__snake_case = {"unk_token": "[UNK]"}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : Any , **SCREAMING_SNAKE_CASE : Tuple ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
'''simple docstring'''
__snake_case = "lower newer"
__snake_case = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[Any]:
'''simple docstring'''
__snake_case = self.get_tokenizer()
__snake_case = "lower newer"
__snake_case = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_tokenizer()
__snake_case = tokenizer("Hello" , "World" )
__snake_case = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
__snake_case = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(
"sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
__snake_case = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__snake_case = tokenizer_class.from_pretrained("microsoft/deberta-base" )
__snake_case = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
__snake_case = [tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) for seq in encoding["input_ids"]]
# fmt: off
__snake_case = {
"input_ids": [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__snake_case = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE )
for expected, decoded in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 371
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase( unittest.TestCase ):
snake_case_ : int = MODEL_FOR_MASKED_LM_MAPPING
snake_case_ : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
__snake_case = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 3_8_0_1_5, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 2_5_5_0_6, "token_str": " accuser"},
] , )
__snake_case = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 3_8_0_1_5,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 2_5_5_0_6,
"token_str": " accuser",
},
] , )
__snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2_9_4_1, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
__snake_case = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 3_5_6_7_6, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"},
] , )
__snake_case = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"},
] , )
__snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2_9_4_1, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"},
] , )
__snake_case = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
[
{
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
__snake_case = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(SCREAMING_SNAKE_CASE )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{"sequence": "My name is John", "score": 0.008, "token": 6_1_0, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1_5_7_3, "token_str": " Chris"},
] , )
__snake_case = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2_2_0_1,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_2_7_9_0,
"token_str": " Lyon",
},
] , )
__snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2_9_4_1, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
__snake_case = None
__snake_case = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE , [] )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
__snake_case = None
__snake_case = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE , [] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> int:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = fill_masker.tokenizer
__snake_case = fill_masker.model
__snake_case = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(SCREAMING_SNAKE_CASE ):
fill_masker("This is" )
self.run_test_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.run_test_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.run_test_top_k_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.fill_mask_with_duplicate_targets_and_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.fill_mask_with_multiple_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case = tokenizer.get_vocab()
__snake_case = sorted(vocab.keys() )[:2]
# Pipeline argument
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , targets=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE )
__snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE ) )
# Call argument
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE )
__snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE ) )
# Score equivalence
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE )
__snake_case = [top_mask["token_str"] for top_mask in outputs]
__snake_case = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE )
__snake_case = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) )
# Raises with invalid
with self.assertRaises(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , top_k=2 )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case = tokenizer.get_vocab()
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
# top_k=2, ntargets=3
__snake_case = sorted(vocab.keys() )[:3]
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=SCREAMING_SNAKE_CASE )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__snake_case = [el["token_str"] for el in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE ).issubset(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=SCREAMING_SNAKE_CASE )
# They should yield exactly the same result
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.get_vocab()
# String duplicates + id duplicates
__snake_case = sorted(vocab.keys() )[:3]
__snake_case = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__snake_case = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 3 )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
] , )
| 371
| 1
|
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : Any = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : Optional[int] = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : str = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : List[str] = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : str = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : List[str] = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : Dict = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : List[str] = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : Dict = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : Any = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : List[str] = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : str = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
class __SCREAMING_SNAKE_CASE ( metaclass=_lowercase ):
snake_case : Optional[int] = ['''flax''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(self , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowerCamelCase ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(cls , ["""flax"""] )
| 700
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Union[str, Any] = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case : List[str] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = text_generator.model.config.eos_token_id
UpperCamelCase__ = """<pad>"""
UpperCamelCase__ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , )
self.assertEqual(
__lowerCAmelCase , [
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_generator, ["This is a test", "Another test"]
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello I believe in"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = text_generator(__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCamelCase__ = text_generator(__lowerCAmelCase , stop_sequence=""" fe""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = text_generator.model
UpperCamelCase__ = text_generator.tokenizer
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = pipeline(task="""text-generation""" , model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , return_full_text=__lowerCAmelCase )
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_text=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ = text_generator("""""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase__ = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCamelCase__ = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__lowerCAmelCase ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
# Classic `model_kwargs`
UpperCamelCase__ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__lowerCAmelCase , top_p=0.5 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello world"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCamelCase__ = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCamelCase__ = logging.get_logger("""transformers.generation.utils""" )
UpperCamelCase__ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__lowerCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_new_tokens=1 )
self.assertNotIn(__lowerCAmelCase , cl.out )
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 )
self.assertNotIn(__lowerCAmelCase , cl.out )
| 548
| 0
|
'''simple docstring'''
from collections import defaultdict
def UpperCAmelCase ( A : int ):
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Dict = True
for v in tree[start]:
if v not in visited:
ret += dfs(A )
if ret % 2 == 0:
cuts.append(A )
return ret
def UpperCAmelCase ( ):
dfs(1 )
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ : Dict = 10, 9
lowerCAmelCase_ : Dict = defaultdict(list)
lowerCAmelCase_ : dict[int, bool] = {}
lowerCAmelCase_ : list[int] = []
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 527
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( A : Optional[Any] , A : Optional[int] , A : int ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE : str = TaConfig.from_json_file(A )
print(F"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = TaForConditionalGeneration(A )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A , A , A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 527
| 1
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Any = False
def lowerCAmelCase_ ( lowerCamelCase ):
return TrainCommand(lowerCamelCase )
class __A ( UpperCamelCase__ ):
@staticmethod
def A__ ( __snake_case :ArgumentParser ):
'''simple docstring'''
__magic_name__ : Tuple =parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__snake_case , required=__snake_case , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__snake_case , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__snake_case , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__snake_case , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__snake_case , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__snake_case , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__snake_case , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__snake_case , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__snake_case , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__snake_case , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__snake_case , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__snake_case , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__snake_case , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__snake_case )
def __init__( self :List[str] , __snake_case :Namespace ):
'''simple docstring'''
__magic_name__ : Dict =logging.get_logger("""transformers-cli/training""" )
__magic_name__ : Any ="""tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__snake_case )
__magic_name__ : Any =args.output
__magic_name__ : Optional[Any] =args.column_label
__magic_name__ : List[Any] =args.column_text
__magic_name__ : Tuple =args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
__magic_name__ : Optional[int] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
__magic_name__ : Dict =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__magic_name__ : int =None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
__magic_name__ : int =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__magic_name__ : List[str] =args.validation_split
__magic_name__ : Union[str, Any] =args.train_batch_size
__magic_name__ : str =args.valid_batch_size
__magic_name__ : Optional[Any] =args.learning_rate
__magic_name__ : List[Any] =args.adam_epsilon
def A__ ( self :Optional[Any] ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def A__ ( self :Optional[Any] ):
'''simple docstring'''
raise NotImplementedError
def A__ ( self :Optional[int] ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 367
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase_ ( *lowerCamelCase , lowerCamelCase = None , lowerCamelCase=True , lowerCamelCase=2 ):
from .. import __version__
__magic_name__ : Optional[int] =take_from
__magic_name__ : Tuple =()
if not isinstance(args[0] , lowerCamelCase ):
__magic_name__ : List[Any] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase ).base_version ) >= version.parse(lowerCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
__magic_name__ : List[str] =None
if isinstance(lowerCamelCase , lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase ),)
__magic_name__ : List[str] =F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(lowerCamelCase , lowerCamelCase ):
values += (getattr(lowerCamelCase , lowerCamelCase ),)
__magic_name__ : List[str] =F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
__magic_name__ : List[str] =F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
__magic_name__ : List[Any] =warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowerCamelCase , stacklevel=lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) > 0:
__magic_name__ : List[Any] =inspect.getouterframes(inspect.currentframe() )[1]
__magic_name__ : Optional[int] =call_frame.filename
__magic_name__ : Tuple =call_frame.lineno
__magic_name__ : str =call_frame.function
__magic_name__ , __magic_name__ : Optional[int] =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(lowerCamelCase ) == 0:
return
elif len(lowerCamelCase ) == 1:
return values[0]
return values
| 367
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a_ = 'docs/source/en/_toctree.yml'
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[Any] = defaultdict(__UpperCamelCase )
__lowercase : List[Any] = []
__lowercase : int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__UpperCamelCase )
__lowercase : int = new_doc_list
__lowercase : Any = [key for key, value in counts.items() if value > 1]
__lowercase : int = []
for duplicate_key in duplicates:
__lowercase : Optional[Any] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
__lowercase : Tuple = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def __UpperCAmelCase ( __UpperCamelCase=False ):
with open(__UpperCamelCase , encoding='''utf-8''' ) as f:
__lowercase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
__lowercase : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowercase : Union[str, Any] = content[api_idx]['''sections''']
# Then to the model doc
__lowercase : List[str] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowercase : str = api_doc[scheduler_idx]['''sections''']
__lowercase : Tuple = clean_doc_toc(__UpperCamelCase )
__lowercase : Dict = False
if new_scheduler_doc != scheduler_doc:
__lowercase : Dict = True
if overwrite:
__lowercase : Optional[Any] = new_scheduler_doc
if diff:
if overwrite:
__lowercase : Tuple = api_doc
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def __UpperCAmelCase ( __UpperCamelCase=False ):
with open(__UpperCamelCase , encoding='''utf-8''' ) as f:
__lowercase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
__lowercase : int = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowercase : Dict = content[api_idx]['''sections''']
# Then to the model doc
__lowercase : int = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowercase : Optional[int] = False
__lowercase : List[Any] = api_doc[pipeline_idx]['''sections''']
__lowercase : List[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowercase : Tuple = pipeline_doc['''section''']
__lowercase : List[Any] = clean_doc_toc(__UpperCamelCase )
if overwrite:
__lowercase : Dict = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
__lowercase : Union[str, Any] = clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
__lowercase : List[Any] = True
if overwrite:
__lowercase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
__lowercase : Union[str, Any] = api_doc
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 76
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowercase : set[int] = set()
return any(
node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for node in graph )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
visited.add(__UpperCamelCase )
rec_stk.add(__UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 76
| 1
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Dict = '''new-model'''
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : str = NewModelConfig
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase ='''bert-base-cased'''
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =TFAutoModel.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase ='''bert-base-cased'''
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
@slow
@require_tensorflow_probability
def A__ (self):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__UpperCAmelCase =AutoConfig.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase) , 1_4_4_1_0)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
self.assertEqual(model.num_parameters() , 1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase) , 1_4_4_1_0)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''')
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =copy.deepcopy(model.config)
__UpperCAmelCase =['''FunnelBaseModel''']
__UpperCAmelCase =TFAutoModel.from_config(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase)
__UpperCAmelCase =TFAutoModel.from_pretrained(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , UpperCAmelCase)
__UpperCAmelCase =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase):
auto_class.register(UpperCAmelCase , UpperCAmelCase)
auto_class.register(UpperCAmelCase , UpperCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase):
auto_class.register(UpperCAmelCase , UpperCAmelCase)
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase =BertModelTester(self).get_config()
__UpperCAmelCase =NewModelConfig(**tiny_config.to_dict())
__UpperCAmelCase =auto_class.from_config(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase)
__UpperCAmelCase =auto_class.from_pretrained(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A__ (self):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''bert-base is not a local folder and is not a valid model identifier'''):
__UpperCAmelCase =TFAutoModel.from_pretrained('''bert-base''')
def A__ (self):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCAmelCase =TFAutoModel.from_pretrained(UpperCAmelCase , revision='''aaaaaa''')
def A__ (self):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__UpperCAmelCase =TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''')
def A__ (self):
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase , '''Use `from_pt=True` to load this model'''):
__UpperCAmelCase =TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''')
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''')
with RequestCounter() as counter:
__UpperCAmelCase =TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
__UpperCAmelCase =TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''')
with RequestCounter() as counter:
__UpperCAmelCase =TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 142
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCamelCase_ = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(snake_case__ ) , version.parse(snake_case__ ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ = None ) -> None:
__UpperCAmelCase =f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , snake_case__ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =requirement, None, None
else:
__UpperCAmelCase =re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , snake_case__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
__UpperCAmelCase , __UpperCAmelCase =match[0]
__UpperCAmelCase =want_full.split(''',''' ) # there could be multiple requirements
__UpperCAmelCase ={}
for w in want_range:
__UpperCAmelCase =re.findall(r'''^([\s!=<>]{1,2})(.+)''' , snake_case__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
__UpperCAmelCase , __UpperCAmelCase =match[0]
__UpperCAmelCase =want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__UpperCAmelCase ='''.'''.join([str(snake_case__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return
# check if any version is installed
try:
__UpperCAmelCase =importlib.metadata.version(snake_case__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase ='''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(snake_case__ , snake_case__ )
| 142
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
A : Tuple = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def _a ( ):
snake_case : int =Github(os.environ['''GITHUB_TOKEN'''] )
snake_case : Any =g.get_repo('''huggingface/diffusers''' )
snake_case : int =repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case : Tuple =sorted(issue.get_comments() , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
snake_case : Any =comments[0] if len(lowerCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 349
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
A : int = {"""target_lang""": """fi""", """source_lang""": """en"""}
A : Tuple = """>>zh<<"""
A : Optional[int] = """Helsinki-NLP/"""
if is_torch_available():
A : Dict = """pt"""
elif is_tf_available():
A : Optional[int] = """tf"""
else:
A : List[str] = """jax"""
@require_sentencepiece
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = MarianTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def __snake_case ( self : List[str] ):
'''simple docstring'''
super().setUp()
snake_case : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case : Optional[Any] =dict(zip(_snake_case, range(len(_snake_case ) ) ) )
snake_case : Dict =Path(self.tmpdirname )
save_json(_snake_case, save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_snake_case, save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_snake_case, save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_snake_case, save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
snake_case : Any =MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str], **_snake_case : Tuple ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname, **_snake_case )
def __snake_case ( self : Any, _snake_case : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Optional[int] ='''</s>'''
snake_case : int =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ), _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ), _snake_case )
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : Tuple =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''</s>''' )
self.assertEqual(vocab_keys[1], '''<unk>''' )
self.assertEqual(vocab_keys[-1], '''<pad>''' )
self.assertEqual(len(_snake_case ), 9 )
def __snake_case ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 9 )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Tuple =MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
snake_case : List[str] =en_de_tokenizer(['''I am a small frog'''], return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
snake_case : Any =[38, 121, 14, 697, 38_848, 0]
self.assertListEqual(_snake_case, batch.input_ids[0] )
snake_case : List[Any] =tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_snake_case )
snake_case : List[Any] =[x.name for x in Path(_snake_case ).glob('''*''' )]
self.assertIn('''source.spm''', _snake_case )
MarianTokenizer.from_pretrained(_snake_case )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Any =self.get_tokenizer()
snake_case : int =tok(
['''I am a small frog''' * 1_000, '''I am a small frog'''], padding=_snake_case, truncation=_snake_case, return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
self.assertEqual(batch.input_ids.shape, (2, 512) )
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : List[str] =self.get_tokenizer()
snake_case : int =tok(['''I am a tiny frog''', '''I am a small frog'''], padding=_snake_case, return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
self.assertEqual(batch_smaller.input_ids.shape, (2, 10) )
@slow
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : List[Any] ={'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case, model_name='''Helsinki-NLP/opus-mt-en-de''', revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''', decode_kwargs={'''use_source_tokenizer''': True}, )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : Optional[Any] =MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
snake_case : List[str] ='''Tämä on testi'''
snake_case : Optional[int] ='''This is a test'''
snake_case : Optional[Any] =[76, 7, 2_047, 2]
snake_case : int =[69, 12, 11, 940, 2]
snake_case : Optional[int] =tokenizer(_snake_case ).input_ids
self.assertListEqual(_snake_case, _snake_case )
snake_case : Optional[Any] =tokenizer(text_target=_snake_case ).input_ids
self.assertListEqual(_snake_case, _snake_case )
snake_case : Optional[int] =tokenizer.decode(_snake_case, skip_special_tokens=_snake_case )
self.assertEqual(_snake_case, _snake_case )
| 349
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : List[Any]=13 , _snake_case : Any=3 , _snake_case : Any=224 , _snake_case : Union[str, Any]=30 , _snake_case : List[str]=400 , _snake_case : Union[str, Any]=True , _snake_case : Optional[Any]=None , _snake_case : List[Any]=True , _snake_case : List[str]=[0.5, 0.5, 0.5] , _snake_case : Optional[Any]=[0.5, 0.5, 0.5] , ):
__lowercase : int = size if size is not None else {'''height''': 18, '''width''': 18}
__lowercase : Any = parent
__lowercase : Dict = batch_size
__lowercase : Any = num_channels
__lowercase : Any = image_size
__lowercase : Dict = min_resolution
__lowercase : Optional[Any] = max_resolution
__lowercase : Dict = do_resize
__lowercase : Tuple = size
__lowercase : List[str] = do_normalize
__lowercase : Union[str, Any] = image_mean
__lowercase : Optional[Any] = image_std
def snake_case_ ( self : Union[str, Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[Any] ):
__lowercase : Optional[int] = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ ( self : Tuple ):
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ ( self : List[Any] ):
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(_snake_case , '''image_std''' ) )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(_snake_case , '''size''' ) )
def snake_case_ ( self : List[str] ):
pass
def snake_case_ ( self : str ):
# Initialize image_processor
__lowercase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
__lowercase : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__lowercase : List[Any] = image_processor(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def snake_case_ ( self : Optional[Any] ):
# Initialize image_processor
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
__lowercase : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__lowercase : Any = image_processor(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def snake_case_ ( self : List[str] ):
# Initialize image_processor
__lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
__lowercase : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__lowercase : Optional[Any] = image_processor(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 284
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowerCAmelCase :
"""simple docstring"""
def snake_case_ ( self : str ):
torch.manual_seed(0 )
__lowercase : Optional[int] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__lowercase : Tuple = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase : List[str] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__lowercase : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def snake_case_ ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase : Optional[int] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__lowercase : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__lowercase : Dict = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase : Dict = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__lowercase : Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase : Any = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def snake_case_ ( self : Any ):
__lowercase : Tuple = self.get_dummy_components()
__lowercase : Any = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : List[str] = self.get_dummy_inputs(_snake_case )
__lowercase : List[Any] = inputs['''prompt''']
__lowercase : int = inputs['''generator''']
__lowercase : Dict = inputs['''num_inference_steps''']
__lowercase : Optional[int] = inputs['''output_type''']
if "image" in inputs:
__lowercase : Tuple = inputs['''image''']
else:
__lowercase : Dict = None
if "mask_image" in inputs:
__lowercase : Tuple = inputs['''mask_image''']
else:
__lowercase : List[Any] = None
if "original_image" in inputs:
__lowercase : List[Any] = inputs['''original_image''']
else:
__lowercase : Optional[int] = None
__lowercase , __lowercase : Optional[int] = pipe.encode_prompt(_snake_case )
# inputs with prompt converted to embeddings
__lowercase : int = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__lowercase : Tuple = image
if mask_image is not None:
__lowercase : List[str] = mask_image
if original_image is not None:
__lowercase : List[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_snake_case , _snake_case , _snake_case )
__lowercase : List[str] = pipe(**_snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_snake_case )
__lowercase : List[Any] = self.pipeline_class.from_pretrained(_snake_case )
pipe_loaded.to(_snake_case )
pipe_loaded.set_progress_bar_config(disable=_snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_snake_case , _snake_case ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
__lowercase : int = self.get_dummy_inputs(_snake_case )
__lowercase : Union[str, Any] = inputs['''generator''']
__lowercase : Any = inputs['''num_inference_steps''']
__lowercase : Tuple = inputs['''output_type''']
# inputs with prompt converted to embeddings
__lowercase : List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__lowercase : Dict = image
if mask_image is not None:
__lowercase : Tuple = mask_image
if original_image is not None:
__lowercase : Any = original_image
__lowercase : List[str] = pipe_loaded(**_snake_case )[0]
__lowercase : Optional[int] = np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max()
self.assertLess(_snake_case , 1E-4 )
def snake_case_ ( self : Optional[int] ):
__lowercase : Union[str, Any] = self.get_dummy_components()
__lowercase : Dict = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : Dict = self.get_dummy_inputs(_snake_case )
__lowercase : Any = pipe(**_snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_snake_case )
__lowercase : Dict = self.pipeline_class.from_pretrained(_snake_case )
pipe_loaded.to(_snake_case )
pipe_loaded.set_progress_bar_config(disable=_snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase : Optional[Any] = self.get_dummy_inputs(_snake_case )
__lowercase : str = pipe_loaded(**_snake_case )[0]
__lowercase : int = np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max()
self.assertLess(_snake_case , 1E-4 )
| 284
| 1
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowercase_ = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''ernie_m'''
lowerCAmelCase_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , _A : int = 25_0002 , _A : int = 768 , _A : int = 12 , _A : int = 12 , _A : int = 3072 , _A : str = "gelu" , _A : float = 0.1 , _A : float = 0.1 , _A : int = 514 , _A : float = 0.02 , _A : int = 1 , _A : float = 1e-05 , _A : Optional[Any]=None , _A : Optional[Any]=False , _A : int=0.0 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_act
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
__SCREAMING_SNAKE_CASE : str = is_decoder
__SCREAMING_SNAKE_CASE : Tuple = act_dropout
| 74
|
from collections import namedtuple
lowercase_: Optional[int] = namedtuple('from_to', 'from_ to')
lowercase_: str = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 10_00),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 3_5.3_1_4_7),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(UpperCAmelCase_))
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(UpperCAmelCase_))
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 0
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __snake_case ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=4 , ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = parent
snake_case__ : List[str] = batch_size
snake_case__ : List[Any] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Union[str, Any] = use_attention_mask
snake_case__ : Dict = use_token_type_ids
snake_case__ : str = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : Any = num_choices
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[Any] = None
if self.use_attention_mask:
snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Any = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = True
__lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = FlaxRoFormerModelTester(self )
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case__ : List[str] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=__UpperCamelCase )
snake_case__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Any = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
snake_case__ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case__ : int = model(__UpperCamelCase )[0]
snake_case__ : Optional[Any] = 50000
snake_case__ : int = (1, 6, vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
snake_case__ : List[Any] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 699
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 699
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _A ( _a ):
'''simple docstring'''
__lowerCamelCase : Any = '''xmod'''
def __init__( self ,SCREAMING_SNAKE_CASE_=30522 ,SCREAMING_SNAKE_CASE_=768 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=3072 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_="absolute" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=("en_XX",) ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
snake_case : Dict = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Any = hidden_act
snake_case : Optional[Any] = intermediate_size
snake_case : str = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : Optional[Any] = type_vocab_size
snake_case : str = initializer_range
snake_case : str = layer_norm_eps
snake_case : List[Any] = position_embedding_type
snake_case : Union[str, Any] = use_cache
snake_case : List[str] = classifier_dropout
snake_case : Optional[Any] = pre_norm
snake_case : Tuple = adapter_reduction_factor
snake_case : Union[str, Any] = adapter_layer_norm
snake_case : List[Any] = adapter_reuse_layer_norm
snake_case : List[Any] = ln_before_adapter
snake_case : List[Any] = list(lowercase__ )
snake_case : List[Any] = default_language
class _A ( _a ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 36
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class A_ ( _a ):
'''simple docstring'''
a__ = "switch_transformers"
a__ = ["past_key_values"]
a__ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__(self , lowercase__=32_128 , lowercase__=768 , lowercase__=64 , lowercase__=2_048 , lowercase__=64 , lowercase__=12 , lowercase__=3 , lowercase__=12 , lowercase__=3 , lowercase__=12 , lowercase__=8 , lowercase__=False , lowercase__=0.01 , lowercase__="float32" , lowercase__=False , lowercase__=32 , lowercase__=128 , lowercase__=0.1 , lowercase__=1E-6 , lowercase__=0.001 , lowercase__=0.001 , lowercase__=1.0 , lowercase__="relu" , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=0 , lowercase__=1 , **lowercase__ , ) -> Dict:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = d_model
__UpperCAmelCase = d_kv
__UpperCAmelCase = d_ff
__UpperCAmelCase = num_sparse_encoder_layers
__UpperCAmelCase = num_layers
__UpperCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__UpperCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
__UpperCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__UpperCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__UpperCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
__UpperCAmelCase = num_heads
__UpperCAmelCase = num_experts
__UpperCAmelCase = expert_capacity
__UpperCAmelCase = router_bias
__UpperCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__UpperCAmelCase = router_dtype
__UpperCAmelCase = router_ignore_padding_tokens
__UpperCAmelCase = relative_attention_num_buckets
__UpperCAmelCase = relative_attention_max_distance
__UpperCAmelCase = dropout_rate
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = feed_forward_proj
__UpperCAmelCase = use_cache
__UpperCAmelCase = add_router_probs
__UpperCAmelCase = router_z_loss_coef
__UpperCAmelCase = router_aux_loss_coef
__UpperCAmelCase = self.feed_forward_proj.split('''-''' )
__UpperCAmelCase = act_info[-1]
__UpperCAmelCase = act_info[0] == '''gated'''
if len(lowercase__ ) > 1 and act_info[0] != "gated" or len(lowercase__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase = '''gelu_new'''
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , **lowercase__ , )
| 303
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : List[str] = 2_5_6
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = ['melgan']
def __init__( self:List[Any] , _a:SpectrogramNotesEncoder , _a:SpectrogramContEncoder , _a:TaFilmDecoder , _a:DDPMScheduler , _a:OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
snake_case__ = math.log(1e-5 ) # Matches MelGAN training.
snake_case__ = 4.0 # Largest value for most examples
snake_case__ = 1_28
self.register_modules(
notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:List[str] , _a:Tuple=(-1.0, 1.0) , _a:List[str]=False ):
snake_case__ , snake_case__ = output_range
if clip:
snake_case__ = torch.clip(_a , self.min_value , self.max_value )
# Scale to [0, 1].
snake_case__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Tuple , _a:Optional[Any]=(-1.0, 1.0) , _a:Optional[Any]=False ):
snake_case__ , snake_case__ = input_range
snake_case__ = torch.clip(_a , _a , _a ) if clip else outputs
# Scale to [0, 1].
snake_case__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[Any] , _a:Optional[Any] , _a:List[Any] ):
snake_case__ = input_tokens > 0
snake_case__ , snake_case__ = self.notes_encoder(
encoder_input_tokens=_a , encoder_inputs_mask=_a )
snake_case__ , snake_case__ = self.continuous_encoder(
encoder_inputs=_a , encoder_inputs_mask=_a )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:List[str] , _a:Dict , _a:Optional[Any] ):
snake_case__ = noise_time
if not torch.is_tensor(_a ):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
snake_case__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
snake_case__ = self.decoder(
encodings_and_masks=_a , decoder_input_tokens=_a , decoder_noise_time=_a )
return logits
@torch.no_grad()
def __call__( self:List[Any] , _a:List[List[int]] , _a:Optional[torch.Generator] = None , _a:int = 1_00 , _a:bool = True , _a:str = "numpy" , _a:Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _a:int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_a )}.""" )
snake_case__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
snake_case__ = np.zeros([1, 0, self.n_dims] , np.floataa )
snake_case__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device )
for i, encoder_input_tokens in enumerate(_a ):
if i == 0:
snake_case__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
snake_case__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_a , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
snake_case__ = ones
snake_case__ = self.scale_features(
_a , output_range=[-1.0, 1.0] , clip=_a )
snake_case__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_a , continuous_mask=_a , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
snake_case__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_a , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_a )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case__ = self.decode(
encodings_and_masks=_a , input_tokens=_a , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
snake_case__ = self.scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = self.scale_to_features(_a , input_range=[-1.0, 1.0] )
snake_case__ = mel[:1]
snake_case__ = mel.cpu().float().numpy()
snake_case__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a )
logger.info('''Generated segment''' , _a )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
snake_case__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
snake_case__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_a )
| 208
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = ['pixel_values']
def __init__( self:List[str] , _a:bool = True , _a:Dict[str, int] = None , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:bool = True , _a:Dict[str, int] = None , _a:bool = True , _a:Union[int, float] = 1 / 2_55 , _a:bool = True , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = True , **_a:Union[str, Any] , ):
super().__init__(**_a )
snake_case__ = size if size is not None else {'''shortest_edge''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a )
snake_case__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a , param_name='''crop_size''' )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case__ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case__ = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self:str , _a:np.ndarray , _a:Dict[str, int] , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:Optional[Union[str, ChannelDimension]] = None , **_a:str , ):
snake_case__ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case__ = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:np.ndarray , _a:Dict[str, int] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Any , ):
snake_case__ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:np.ndarray , _a:Union[int, float] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:List[Any] , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:np.ndarray , _a:Union[float, List[float]] , _a:Union[float, List[float]] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Tuple , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:ImageInput , _a:bool = None , _a:Dict[str, int] = None , _a:PILImageResampling = None , _a:bool = None , _a:int = None , _a:bool = None , _a:float = None , _a:bool = None , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = None , _a:Optional[Union[str, TensorType]] = None , _a:Optional[ChannelDimension] = ChannelDimension.FIRST , **_a:Any , ):
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(_a , param_name='''size''' , default_to_square=_a )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(_a , param_name='''crop_size''' , default_to_square=_a )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case__ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case__ = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(_a ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
snake_case__ = [to_channel_dimension_format(_a , _a ) for image in images]
snake_case__ = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 208
| 1
|
a__ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def __UpperCAmelCase ( __a : str ) -> int:
"""simple docstring"""
_a : Optional[int] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
_a : Union[str, Any] = 0
_a : List[Any] = 0
while place < len(__a ):
if (place + 1 < len(__a )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __UpperCAmelCase ( __a : int ) -> str:
"""simple docstring"""
_a : List[str] = []
for arabic, roman in ROMAN:
((_a) , (_a)) : Optional[Any] = divmod(__a ,__a )
result.append(roman * factor )
if number == 0:
break
return "".join(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654
| 0
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class lowercase__ ( __A ):
__UpperCamelCase = ["""input_features""", """attention_mask"""]
def __init__( self , _lowercase=80 , _lowercase=16_000 , _lowercase=0.0 , _lowercase=10 , _lowercase=25 , _lowercase="hamming_window" , _lowercase=32_768.0 , _lowercase=0.97 , _lowercase=1.0 , _lowercase=True , _lowercase=True , _lowercase=False , **_lowercase , ):
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
lowerCAmelCase_ : Dict = feature_size
lowerCAmelCase_ : List[str] = sampling_rate
lowerCAmelCase_ : str = padding_value
lowerCAmelCase_ : List[str] = hop_length
lowerCAmelCase_ : List[Any] = win_length
lowerCAmelCase_ : List[Any] = frame_signal_scale
lowerCAmelCase_ : Any = preemphasis_coeff
lowerCAmelCase_ : List[Any] = mel_floor
lowerCAmelCase_ : str = normalize_means
lowerCAmelCase_ : Optional[Any] = normalize_vars
lowerCAmelCase_ : Dict = win_function
lowerCAmelCase_ : Union[str, Any] = return_attention_mask
lowerCAmelCase_ : str = win_length * sampling_rate // 1_000
lowerCAmelCase_ : Any = hop_length * sampling_rate // 1_000
lowerCAmelCase_ : int = optimal_fft_length(self.sample_size )
lowerCAmelCase_ : int = (self.n_fft // 2) + 1
def UpperCAmelCase__ ( self , _lowercase ):
if self.win_function == "hamming_window":
lowerCAmelCase_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowercase )
else:
lowerCAmelCase_ : int = window_function(window_length=self.sample_size , name=self.win_function )
lowerCAmelCase_ : str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowerCAmelCase_ : List[Any] = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowercase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowercase , preemphasis=self.preemphasis_coeff , mel_filters=_lowercase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
# make sure we normalize float32 arrays
if self.normalize_means:
lowerCAmelCase_ : Tuple = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : Optional[Any] = np.subtract(_lowercase , _lowercase )
if self.normalize_vars:
lowerCAmelCase_ : Dict = x[:input_length].std(axis=0 )
lowerCAmelCase_ : Dict = np.divide(_lowercase , _lowercase )
if input_length < x.shape[0]:
lowerCAmelCase_ : str = padding_value
# make sure array is in float32
lowerCAmelCase_ : List[str] = x.astype(np.floataa )
return x
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
lowerCAmelCase_ : int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowercase , _lowercase , self.padding_value ) for x, n in zip(_lowercase , _lowercase )]
def __call__( self , _lowercase , _lowercase = False , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase_ : Any = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Dict = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
lowerCAmelCase_ : Optional[int] = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : str = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Union[str, Any] = [self._extract_mfsc_features(_lowercase ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : List[str] = BatchFeature({"""input_features""": features} )
lowerCAmelCase_ : Union[str, Any] = self.pad(
_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
# make sure list is in array format
lowerCAmelCase_ : List[str] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _lowercase ):
lowerCAmelCase_ : Optional[Any] = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCAmelCase_ : Tuple = [np.asarray(_lowercase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCAmelCase_ : int = (
np.array(_lowercase , dtype=np.intaa )
if self._get_padding_strategies(_lowercase , max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCAmelCase_ : int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_lowercase )
if return_tensors is not None:
lowerCAmelCase_ : Union[str, Any] = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 714
|
UpperCAmelCase_ : str = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCAmelCase ( _a : str ) -> int:
lowerCAmelCase_ : Any = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
lowerCAmelCase_ : Stack[int] = Stack()
lowerCAmelCase_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_a ) )
elif i in operators:
# RULE 2
operator_stack.push(_a )
elif i == ")":
# RULE 4
lowerCAmelCase_ : Optional[int] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase_ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ : Dict = operators[opr](_a , _a )
operand_stack.push(_a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase_ : Dict = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 440
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__snake_case = (7_2_0, 1_2_8_0) # Height, Width
__snake_case = (0.4, 0.6) # if height or width lower than this scale, drop it.
__snake_case = 1 / 1_0_0
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = 2_5_0
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = get_dataset(_lowercase , _lowercase )
for index in range(_lowercase ):
__UpperCamelCase = random.sample(range(len(_lowercase ) ) , 4 )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = update_image_and_anno(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , filter_scale=_lowercase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase = random_chars(32 )
__UpperCamelCase = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
__UpperCamelCase = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , _lowercase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__UpperCamelCase = []
for anno in new_annos:
__UpperCamelCase = anno[3] - anno[1]
__UpperCamelCase = anno[4] - anno[2]
__UpperCamelCase = anno[1] + width / 2
__UpperCamelCase = anno[2] + height / 2
__UpperCamelCase = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_lowercase )
with open(f'''{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _A ( _lowercase , _lowercase ) -> tuple[list, list]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = []
for label_file in glob.glob(os.path.join(_lowercase , '*.txt' ) ):
__UpperCamelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(_lowercase ) as in_file:
__UpperCamelCase = in_file.readlines()
__UpperCamelCase = os.path.join(_lowercase , f'''{label_name}.jpg''' )
__UpperCamelCase = []
for obj_list in obj_lists:
__UpperCamelCase = obj_list.rstrip('\n' ).split(' ' )
__UpperCamelCase = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowercase )
labels.append(_lowercase )
return img_paths, labels
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
__UpperCamelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase = int(scale_x * output_size[1] )
__UpperCamelCase = int(scale_y * output_size[0] )
__UpperCamelCase = []
__UpperCamelCase = []
for i, index in enumerate(_lowercase ):
__UpperCamelCase = all_img_list[index]
path_list.append(_lowercase )
__UpperCamelCase = all_annos[index]
__UpperCamelCase = cva.imread(_lowercase )
if i == 0: # top-left
__UpperCamelCase = cva.resize(_lowercase , (divid_point_x, divid_point_y) )
__UpperCamelCase = img
for bbox in img_annos:
__UpperCamelCase = bbox[1] * scale_x
__UpperCamelCase = bbox[2] * scale_y
__UpperCamelCase = bbox[3] * scale_x
__UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase = cva.resize(_lowercase , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase = img
for bbox in img_annos:
__UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase = bbox[2] * scale_y
__UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase = cva.resize(_lowercase , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase = img
for bbox in img_annos:
__UpperCamelCase = bbox[1] * scale_x
__UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase = bbox[3] * scale_x
__UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase = cva.resize(
_lowercase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase = img
for bbox in img_annos:
__UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( _lowercase ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase = ascii_lowercase + digits
return "".join(random.choice(_lowercase ) for _ in range(_lowercase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1
| 1
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
SCREAMING_SNAKE_CASE_ = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE_ = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
__a = (images / 2 + 0.5).clamp(0 , 1 )
__a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = numpy_to_pil(UpperCamelCase__ )
return images
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if images.ndim == 3:
__a = images[None, ...]
__a = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__a = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
__a = [Image.fromarray(UpperCamelCase__ ) for image in images]
return pil_images
| 720
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :Optional[int] =ConsistencyModelPipeline
a_ :List[Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ :Optional[int] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ :Optional[Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def __a ( self : Dict ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def __a ( self : Tuple ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def __a ( self : str , SCREAMING_SNAKE_CASE__ : int=False ):
'''simple docstring'''
if class_cond:
__a = self.dummy_cond_unet
else:
__a = self.dummy_uncond_unet
# Default to CM multistep sampler
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
__a = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__a = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def __a ( self : str ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__a = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : int ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__a = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__a = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a = 0
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : str ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__a = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a = 1
__a = None
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : List[Any] ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__a = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__a = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a = 1
__a = None
__a = 0
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : int="cpu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=torch.floataa , SCREAMING_SNAKE_CASE__ : Union[str, Any]=(1, 3, 6_4, 6_4) ):
'''simple docstring'''
__a = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
__a = {
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
__a = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
__a = latents
return inputs
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]="cpu" , SCREAMING_SNAKE_CASE__ : List[str]=torch.floataa , SCREAMING_SNAKE_CASE__ : List[str]=(1, 3, 6_4, 6_4) ):
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE__ ) == str:
__a = torch.device(SCREAMING_SNAKE_CASE__ )
__a = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
return latents
def __a ( self : str ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_inputs()
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __a ( self : List[Any] ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_inputs()
__a = 1
__a = None
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __a ( self : Tuple ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
__a = 1
__a = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 201
| 0
|
def __lowerCAmelCase ( ) -> int:
return 1
def __lowerCAmelCase ( A_ : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __lowerCAmelCase ( A_ : int ) -> List[Any]:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a__ )
def __lowerCAmelCase ( A_ : int ) -> Tuple:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a__ )
def __lowerCAmelCase ( A_ : int ) -> str:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a__ )
def __lowerCAmelCase ( A_ : int ) -> List[Any]:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a__ )
def __lowerCAmelCase ( A_ : int ) -> Union[str, Any]:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(a__ )
def __lowerCAmelCase ( A_ : int ) -> Union[str, Any]:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(a__ )
def __lowerCAmelCase ( A_ : int = 2_00 ) -> str:
return two_pound(a__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 221
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
UpperCamelCase__ = "▁"
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = VOCAB_FILES_NAMES
snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : int = BigBirdTokenizer
snake_case : List[Any] = ["""input_ids""", """attention_mask"""]
snake_case : List[int] = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase="[CLS]" , **__lowerCAmelCase , ):
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else bos_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else eos_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else pad_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else cls_token
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 619
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ = 6 ):
"""simple docstring"""
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Tuple = None
self.create_linked_list(UpperCamelCase__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = Node()
lowerCAmelCase : List[Any] = current_node
lowerCAmelCase : int = current_node
lowerCAmelCase : List[Any] = current_node
for _ in range(1 , UpperCamelCase__ ):
lowerCAmelCase : str = Node()
lowerCAmelCase : Union[str, Any] = current_node
lowerCAmelCase : Tuple = previous_node
lowerCAmelCase : Optional[int] = current_node
lowerCAmelCase : Union[str, Any] = self.front
lowerCAmelCase : Optional[Any] = previous_node
def lowercase__ ( self ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowercase__ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase : Optional[int] = self.rear.next
if self.rear:
lowerCAmelCase : Optional[int] = data
def lowercase__ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase : Optional[Any] = self.front.data
lowerCAmelCase : Union[str, Any] = None
return data
lowerCAmelCase : Any = self.front
lowerCAmelCase : Tuple = old_front.next
lowerCAmelCase : Tuple = old_front.data
lowerCAmelCase : List[str] = None
return data
def lowercase__ ( self ):
"""simple docstring"""
if self.is_empty():
raise Exception("Empty Queue" )
def lowercase__ ( self ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : int = None
lowerCAmelCase : str = None
lowerCAmelCase : int = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681
| 0
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[float] ):
'''simple docstring'''
if len(__lowerCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
__magic_name__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = old_name
if "patch_embed" in old_name:
__lowercase , __lowercase , __lowercase = old_name.split('''.''' )
if layer == "0":
__lowercase = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__lowercase = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__lowercase = old_name.replace('''3''' , '''convolution2''' )
else:
__lowercase = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , a__ ):
__lowercase = r'''\b\d{2}\b'''
if bool(re.search(a__ , a__ ) ):
__lowercase = re.search(r'''\d\.\d\d.''' , a__ ).group()
else:
__lowercase = re.search(r'''\d\.\d.''' , a__ ).group()
if int(match[0] ) < 6:
__lowercase = old_name.replace(a__ , '''''' )
__lowercase = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__lowercase = '''intermediate_stages.''' + trimmed_name
else:
__lowercase = old_name.replace(a__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__lowercase = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__lowercase = str(int(match[2] ) - num_meta4D_last_stage )
__lowercase = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__lowercase = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__lowercase = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__lowercase = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__lowercase = trimmed_name.replace('''fc2''' , '''linear_out''' )
__lowercase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , a__ ):
__lowercase = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__lowercase = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowercase = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowercase = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__lowercase = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__lowercase = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__lowercase = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__lowercase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowercase = new_name.replace('''norm''' , '''layernorm''' )
__lowercase = '''efficientformer.''' + new_name
else:
__lowercase = '''efficientformer.encoder.''' + new_name
return new_name
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key in checkpoint.copy().keys():
__lowercase = checkpoint.pop(a__ )
__lowercase = val
return checkpoint
def lowercase_ ( ):
'''simple docstring'''
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = torch.load(a__ , map_location='''cpu''' )['''model''']
__lowercase = EfficientFormerConfig.from_json_file(a__ )
__lowercase = EfficientFormerForImageClassificationWithTeacher(a__ )
__lowercase = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__lowercase = config.depths[-1] - config.num_metaad_blocks + 1
__lowercase = convert_torch_checkpoint(a__ , a__ )
model.load_state_dict(a__ )
model.eval()
__lowercase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowercase = prepare_img()
__lowercase = 2_56
__lowercase = 2_24
__lowercase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__lowercase = processor(images=a__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__lowercase = Compose(
[
Resize(a__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(a__ ),
ToTensor(),
Normalize(a__ , a__ ),
] )
__lowercase = image_transforms(a__ ).unsqueeze(0 )
assert torch.allclose(a__ , a__ )
__lowercase = model(a__ )
__lowercase = outputs.logits
__lowercase = (1, 10_00)
if "l1" in model_name:
__lowercase = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , a__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowercase = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , a__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowercase = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(a__ )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='''Add model''' , use_temp_dir=a__ , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='''Add image processor''' , use_temp_dir=a__ , )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
a : Dict = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 716
|
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 527
| 0
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCAmelCase__ ( _a : str , _a : str , _a : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
snake_case_ : Tuple = quote(__lowerCamelCase )
return hfh.hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" , revision=__lowerCamelCase )
| 568
|
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : str = math.log(len(__lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63
| 0
|
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = val
lowercase_ :int = None
lowercase_ :Any = None
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.val:
if val < self.val:
if self.left is None:
lowercase_ :Tuple = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowercase_ :int = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowercase_ :Optional[int] = val
def UpperCamelCase ( _a , _a ) -> Dict:
'''simple docstring'''
if root:
inorder(root.left , _a )
res.append(root.val )
inorder(root.right , _a )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
if len(_a ) == 0:
return arr
lowercase_ :str = Node(arr[0] )
for i in range(1 , len(_a ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase_ :List[str] = []
inorder(_a , _a )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 707
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=[30, 30] , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=None , UpperCamelCase_=8 , UpperCamelCase_=10 , ):
lowercase_ :Tuple = parent
lowercase_ :Dict = batch_size
lowercase_ :Optional[Any] = image_size
lowercase_ :Optional[int] = patch_size
lowercase_ :List[str] = num_channels
lowercase_ :Dict = is_training
lowercase_ :int = use_labels
lowercase_ :Any = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Any = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :List[str] = hidden_act
lowercase_ :int = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Any = type_sequence_label_size
lowercase_ :Union[str, Any] = initializer_range
lowercase_ :Tuple = num_labels
lowercase_ :Dict = scope
lowercase_ :int = n_targets
lowercase_ :int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase_ :int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase_ :int = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase_ :Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase_ :Any = []
for i in range(self.batch_size ):
lowercase_ :Dict = {}
lowercase_ :Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase_ )
lowercase_ :str = torch.rand(self.n_targets , 4 , device=UpperCamelCase_ )
labels.append(UpperCamelCase_ )
lowercase_ :str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = YolosModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase_ :str = model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[int] = YolosForObjectDetection(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase_ :Dict = model(pixel_values=UpperCamelCase_ )
lowercase_ :Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase_ :Optional[Any] = model(pixel_values=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ :Tuple = config_and_inputs
lowercase_ :int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase : Union[str, Any] =(
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase : Optional[int] =False
lowercase : Optional[int] =False
lowercase : Dict =False
lowercase : List[str] =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
lowercase_ :Optional[Any] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase_ :Dict = []
for i in range(self.model_tester.batch_size ):
lowercase_ :Optional[int] = {}
lowercase_ :Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase_ , dtype=torch.long )
lowercase_ :Union[str, Any] = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase_ , dtype=torch.float )
labels.append(UpperCamelCase_ )
lowercase_ :Optional[int] = labels
return inputs_dict
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = YolosModelTester(self )
lowercase_ :List[Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
# YOLOS does not use inputs_embeds
pass
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Optional[Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Any = model_class(UpperCamelCase_ )
lowercase_ :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :Optional[Any] = [*signature.parameters.keys()]
lowercase_ :int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :Tuple = True
# in YOLOS, the seq_len is different
lowercase_ :Optional[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase_ :Any = True
lowercase_ :List[str] = False
lowercase_ :List[str] = True
lowercase_ :Any = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase_ :str = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :str = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ :Union[str, Any] = True
lowercase_ :int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase_ :List[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :int = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase_ :str = len(UpperCamelCase_ )
# Check attention is always last and order is fine
lowercase_ :List[str] = True
lowercase_ :Tuple = True
lowercase_ :List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase_ :Any = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :List[str] = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) )
lowercase_ :List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase_ :Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :str = outputs.hidden_states
lowercase_ :str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# YOLOS has a different seq_length
lowercase_ :Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Optional[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ :Dict = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase_ )
@slow
def UpperCamelCase ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Dict = YolosModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowercase_ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
lowercase_ :Any = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase_ )
lowercase_ :Tuple = self.default_image_processor
lowercase_ :str = prepare_img()
lowercase_ :List[str] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowercase_ :Optional[int] = model(inputs.pixel_values )
# verify outputs
lowercase_ :List[str] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase_ :int = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase_ , )
lowercase_ :Tuple = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
# verify postprocessing
lowercase_ :List[str] = image_processor.post_process_object_detection(
UpperCamelCase_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase_ :str = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCamelCase_ )
lowercase_ :Optional[int] = [75, 75, 17, 63, 17]
lowercase_ :List[str] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase_ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase_ , atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase_ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase_ ) )
| 441
| 0
|
def __snake_case ( _lowerCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __snake_case ( _lowerCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454
|
def __snake_case ( _lowerCAmelCase : int ) -> bool:
if num < 0:
return False
A_ : int = num
A_ : int = 0
while num > 0:
A_ : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
"""simple docstring"""
a = field(
default=__UpperCAmelCase , metadata={"help": "Model type selected in the list: " + ", ".join(__UpperCAmelCase )} )
a = field(
default=__UpperCAmelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
a = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
a = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
a = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
a = field(
default=__UpperCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a = field(
default=__UpperCAmelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
a = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
a = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class _snake_case ( __UpperCAmelCase ):
"""simple docstring"""
a = "train"
a = "dev"
class _snake_case ( __UpperCAmelCase ):
"""simple docstring"""
a = 42
a = 42
a = 42
a = 42
def __init__( self : Any , _A : Dict , _A : str , _A : List[str] = None , _A : List[str] = Split.train , _A : int = False , _A : Any = None , _A : Optional[Any] = "pt" , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = args
_SCREAMING_SNAKE_CASE : Optional[Any] = is_language_sensitive
_SCREAMING_SNAKE_CASE : Tuple = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A):
try:
_SCREAMING_SNAKE_CASE : List[Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = mode
# Load data features from cache or dataset file
_SCREAMING_SNAKE_CASE : Union[str, Any] = """v2""" if args.version_2_with_negative else """v1"""
_SCREAMING_SNAKE_CASE : int = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_SCREAMING_SNAKE_CASE : List[str] = cached_features_file + """.lock"""
with FileLock(_A):
if os.path.exists(_A) and not args.overwrite_cache:
_SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
_SCREAMING_SNAKE_CASE : List[str] = torch.load(_A)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_SCREAMING_SNAKE_CASE : Optional[int] = self.old_features["""features"""]
_SCREAMING_SNAKE_CASE : Tuple = self.old_features.get("""dataset""" , _A)
_SCREAMING_SNAKE_CASE : Tuple = self.old_features.get("""examples""" , _A)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
""" future run""")
else:
if mode == Split.dev:
_SCREAMING_SNAKE_CASE : List[Any] = self.processor.get_dev_examples(args.data_dir)
else:
_SCREAMING_SNAKE_CASE : Tuple = self.processor.get_train_examples(args.data_dir)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
_SCREAMING_SNAKE_CASE : List[str] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : str):
"""simple docstring"""
return len(self.features)
def __getitem__( self : int , _A : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.features[i]
_SCREAMING_SNAKE_CASE : Any = torch.tensor(feature.input_ids , dtype=torch.long)
_SCREAMING_SNAKE_CASE : str = torch.tensor(feature.attention_mask , dtype=torch.long)
_SCREAMING_SNAKE_CASE : str = torch.tensor(feature.token_type_ids , dtype=torch.long)
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(feature.cls_index , dtype=torch.long)
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float)
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(feature.is_impossible , dtype=torch.float)
_SCREAMING_SNAKE_CASE : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask})
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible})
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa) * self.args.lang_id)})
if self.mode == Split.train:
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(feature.start_position , dtype=torch.long)
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor(feature.end_position , dtype=torch.long)
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions})
return inputs
| 717
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
"""simple docstring"""
def __init__( self : int , _A : List[Any] , _A : int , _A : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""")
_SCREAMING_SNAKE_CASE : str = img
_SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1]
_SCREAMING_SNAKE_CASE : Tuple = img.shape[0]
_SCREAMING_SNAKE_CASE : Any = dst_width
_SCREAMING_SNAKE_CASE : Any = dst_height
_SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5
)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
_SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)]
def _lowerCAmelCase ( self : int , _A : int):
"""simple docstring"""
return int(self.ratio_x * x)
def _lowerCAmelCase ( self : str , _A : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 635
| 0
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : list ) -> None:
lowerCAmelCase__ = set_counts
lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = [1] * num_sets
lowerCAmelCase__ = list(range(SCREAMING_SNAKE_CASE__ ) )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool:
lowerCAmelCase__ = self.get_parent(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_parent(SCREAMING_SNAKE_CASE__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCAmelCase__ = 0
lowerCAmelCase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCAmelCase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCAmelCase__ = 0
lowerCAmelCase__ = src_parent
lowerCAmelCase__ = self.set_counts[src_parent]
lowerCAmelCase__ = max(self.max_set , SCREAMING_SNAKE_CASE__ )
return True
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
lowerCAmelCase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 61
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TaConfig.from_json_file(a )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a , a , a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 353
|
def A_ ( a , a ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : bool = field(default=__magic_name__ , metadata={'help': 'Whether to use SortishSampler or not.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[Union[str, Path, GenerationConfig]] = field(
default=__magic_name__ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = v.to_dict()
return d
| 412
|
from __future__ import annotations
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) == 0:
return []
lowercase_ , lowercase_ = min(UpperCAmelCase__ ), max(UpperCAmelCase__ )
lowercase_ = int(max_value - min_value ) + 1
lowercase_ = [[] for _ in range(UpperCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(UpperCAmelCase__ )
return [v for bucket in buckets for v in sorted(UpperCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 412
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import gcd
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = 2 , __UpperCAmelCase = 1 , __UpperCAmelCase = 3 , ) -> int | None:
'''simple docstring'''
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
return (pow(__UpperCAmelCase , 2 ) + step) % modulus
for _ in range(__UpperCAmelCase ):
# These track the position within the cycle detection logic.
__SCREAMING_SNAKE_CASE = seed
__SCREAMING_SNAKE_CASE = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__SCREAMING_SNAKE_CASE = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__SCREAMING_SNAKE_CASE = gcd(hare - tortoise , __UpperCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__SCREAMING_SNAKE_CASE = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
a = parser.parse_args()
a = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
a = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 109
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowercase ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Tuple ):
lowercase_ : Any = OmegaConf.load(__snake_case )
lowercase_ : Dict = torch.load(__snake_case , map_location='''cpu''' )['''model''']
lowercase_ : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase_ : List[str] = {}
lowercase_ : str = '''first_stage_model.'''
for key in keys:
if key.startswith(__snake_case ):
lowercase_ : Optional[int] = state_dict[key]
# extract state_dict for UNetLDM
lowercase_ : str = {}
lowercase_ : Any = '''model.diffusion_model.'''
for key in keys:
if key.startswith(__snake_case ):
lowercase_ : Any = state_dict[key]
lowercase_ : Optional[int] = config.model.params.first_stage_config.params
lowercase_ : List[Any] = config.model.params.unet_config.params
lowercase_ : Any = VQModel(**__snake_case ).eval()
vqvae.load_state_dict(__snake_case )
lowercase_ : Optional[int] = UNetLDMModel(**__snake_case ).eval()
unet.load_state_dict(__snake_case )
lowercase_ : str = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__snake_case , )
lowercase_ : Dict = LDMPipeline(__snake_case , __snake_case , __snake_case )
pipeline.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__A : int = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 231
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyVaaPipeline
UpperCAmelCase = [
"image_embeds",
"negative_image_embeds",
]
UpperCAmelCase = ["image_embeds", "negative_image_embeds"]
UpperCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_SCREAMING_SNAKE_CASE ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple=0 ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
_SCREAMING_SNAKE_CASE =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='''red cat, 4k photo'''
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cuda''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cuda''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipeline(
image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a , _a )
| 706
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : Tuple , _a : str , _a : List[str]=7 , _a : Union[str, Any]=3 , _a : List[str]=18 , _a : List[Any]=30 , _a : Optional[Any]=400 , _a : Dict=None , _a : Union[str, Any]=True , _a : List[str]=True , _a : Optional[Any]=None , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 20, '''width''': 20}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =do_convert_rgb
_SCREAMING_SNAKE_CASE =[512, 1024, 2048, 4096]
_SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self )
@property
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_dummy_image()
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE =2048
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''pt''' , max_patches=_a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
_SCREAMING_SNAKE_CASE =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
_SCREAMING_SNAKE_CASE ='''Hello'''
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self , num_channels=4 )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='''pt''' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 191
| 0
|
class lowerCAmelCase_ :
def __init__( self : Tuple ) ->int:
"""simple docstring"""
a__ :Optional[Any] = 0
a__ :str = 0
a__ :Optional[Any] = {}
def _snake_case ( self : List[Any] , __A : Union[str, Any] ) ->Any:
"""simple docstring"""
if vertex not in self.adjacency:
a__ :Any = {}
self.num_vertices += 1
def _snake_case ( self : Union[str, Any] , __A : int , __A : Optional[int] , __A : Optional[Any] ) ->str:
"""simple docstring"""
self.add_vertex(__A )
self.add_vertex(__A )
if head == tail:
return
a__ :List[str] = weight
a__ :List[str] = weight
def _snake_case ( self : int ) ->Tuple:
"""simple docstring"""
a__ :Dict = self.get_edges()
for edge in edges:
a__ , a__ , a__ :Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(__A ) ):
a__ :str = list(edges[i] )
edges.sort(key=lambda __A : e[2] )
for i in range(len(__A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
a__ :Dict = edges[i][2] + 1
for edge in edges:
a__ , a__ , a__ :Dict = edge
a__ :List[Any] = weight
a__ :Tuple = weight
def __str__( self : List[str] ) ->Dict:
"""simple docstring"""
a__ :int = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
a__ :Union[str, Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a__ :int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _snake_case ( self : str ) ->Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def _snake_case ( __A : Optional[int]=None , __A : Optional[Any]=None ) ->Optional[Any]:
"""simple docstring"""
a__ :Optional[int] = Graph()
if vertices is None:
a__ :Optional[Any] = []
if edges is None:
a__ :Optional[int] = []
for vertex in vertices:
g.add_vertex(__A )
for edge in edges:
g.add_edge(*__A )
return g
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a__ :str = {}
a__ :Optional[int] = {}
def __len__( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
return len(self.parent )
def _snake_case ( self : Tuple , __A : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
if item in self.parent:
return self.find(__A )
a__ :Any = item
a__ :Dict = 0
return item
def _snake_case ( self : Tuple , __A : List[str] ) ->List[Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(__A )
if item != self.parent[item]:
a__ :List[Any] = self.find(self.parent[item] )
return self.parent[item]
def _snake_case ( self : Optional[int] , __A : Optional[int] , __A : Optional[Any] ) ->List[str]:
"""simple docstring"""
a__ :List[Any] = self.find(__A )
a__ :Optional[Any] = self.find(__A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
a__ :Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
a__ :List[str] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
a__ :Optional[Any] = roota
return roota
return None
@staticmethod
def _snake_case ( __A : int ) ->int:
"""simple docstring"""
a__ :Dict = graph.num_vertices
a__ :Any = Graph.UnionFind()
a__ :List[Any] = []
while num_components > 1:
a__ :Optional[int] = {}
for vertex in graph.get_vertices():
a__ :Union[str, Any] = -1
a__ :Union[str, Any] = graph.get_edges()
for edge in edges:
a__ , a__ , a__ :List[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
a__ , a__ , a__ :List[Any] = edge
a__ :Dict = union_find.find(__A )
a__ :Dict = union_find.find(__A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__ :Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__ :Union[str, Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
a__ , a__ , a__ :Tuple = cheap_edge[vertex]
if union_find.find(__A ) != union_find.find(__A ):
union_find.union(__A , __A )
mst_edges.append(cheap_edge[vertex] )
a__ :Dict = num_components - 1
a__ :int = Graph.build(edges=__A )
return mst
| 395
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
snake_case__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.')
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Dict , __A : Path , __A : Union[str, None] = None , __A : Union[List[str], None] = None , __A : Union[str, List[str], None] = None , __A : bool = True , ) ->Any:
"""simple docstring"""
a__ :Dict = [file for file in os.listdir(__A ) if os.path.isfile(os.path.join(__A , __A ) )]
if identifier is not None:
a__ :Union[str, Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__A , __A ):
for n_ in n_identifier:
a__ :Union[str, Any] = [file for file in files if n_ not in file]
else:
a__ :Dict = [file for file in files if n_identifier not in file]
a__ :List[str] = ignore_files or []
ignore_files.append("__init__.py" )
a__ :Optional[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , __A )
if only_modules:
a__ :Tuple = file.split("." )[0]
try:
a__ :Dict = getattr(__A , __A )
a__ :int = doctest.DocTestSuite(__A )
a__ :Any = unittest.TextTestRunner().run(__A )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
a__ :int = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _snake_case ( self : int ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = Path("src/transformers" )
a__ :Union[str, Any] = "modeling"
a__ :Any = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(__A , identifier=__A , ignore_files=__A )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = Path("src/transformers" )
a__ :Dict = "tokenization"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a__ :List[Any] = Path("src/transformers" )
a__ :List[Any] = "configuration"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :List[str] = Path("src/transformers" )
a__ :str = ["configuration", "modeling", "tokenization"]
self.analyze_directory(__A , n_identifier=__A )
def _snake_case ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
a__ :List[str] = Path("docs/source" )
a__ :Union[str, Any] = ["favicon.ico"]
self.analyze_directory(__A , ignore_files=__A , only_modules=__A )
| 395
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Tuple = FunnelConfig.from_json_file(_lowerCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowerCamelCase : Dict = FunnelBaseModel(_lowerCamelCase ) if base_model else FunnelModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 386
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any]=13 ,__lowerCAmelCase: List[str]=3 ,__lowerCAmelCase: Optional[Any]=224 ,__lowerCAmelCase: Optional[int]=30 ,__lowerCAmelCase: Union[str, Any]=400 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Union[str, Any]=[0.5, 0.5, 0.5] ,__lowerCAmelCase: Tuple=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
_lowerCamelCase : Tuple = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[int] = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : int = do_resize
_lowerCamelCase : Dict = size
_lowerCamelCase : Optional[int] = do_normalize
_lowerCamelCase : int = image_mean
_lowerCamelCase : Tuple = image_std
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = ViTImageProcessor if is_vision_available() else None
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_std" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_lowerCamelCase : Dict = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : str = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_lowerCamelCase : List[Any] = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Dict = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : int = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_lowerCamelCase : int = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Tuple = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
| 386
| 1
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = tmp_path / '''file.csv'''
__UpperCamelCase :Union[str, Any] = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = tmp_path / '''malformed_file.csv'''
__UpperCamelCase :int = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = tmp_path / '''csv_with_image.csv'''
__UpperCamelCase :List[Any] = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = tmp_path / '''csv_with_label.csv'''
__UpperCamelCase :Union[str, Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = tmp_path / '''csv_with_int_list.csv'''
__UpperCamelCase :str = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = Csv()
__UpperCamelCase :int = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(SCREAMING_SNAKE_CASE , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
__UpperCamelCase :Tuple = f.read().splitlines()[1]
__UpperCamelCase :Dict = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
__UpperCamelCase :List[Any] = csv._generate_tables([[csv_file_with_image]] )
__UpperCamelCase :Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
__UpperCamelCase :Union[str, Any] = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
__UpperCamelCase :List[str] = f.read().splitlines()[1:]
__UpperCamelCase :int = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
__UpperCamelCase :str = csv._generate_tables([[csv_file_with_label]] )
__UpperCamelCase :str = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
__UpperCamelCase :Dict = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda SCREAMING_SNAKE_CASE : [int(SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__UpperCamelCase :Union[str, Any] = csv._generate_tables([[csv_file_with_int_list]] )
__UpperCamelCase :Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
__UpperCamelCase :Dict = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 167
|
from __future__ import annotations
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :Optional[Any] = np.shape(SCREAMING_SNAKE_CASE )
if rows != columns:
__UpperCamelCase :Dict = (
'''\'table\' has to be of square shaped array but got a '''
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = np.zeros((rows, columns) )
__UpperCamelCase :Tuple = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Union[str, Any] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
__UpperCamelCase :Tuple = (table[i][j] - total) / upper[j][j]
__UpperCamelCase :Optional[int] = 1
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :Dict = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
| 1
|
"""simple docstring"""
from __future__ import annotations
_lowerCamelCase = []
def __lowercase ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
for i in range(len(lowerCamelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCamelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCamelCase_ , -1 , -1 ) , range(lowerCamelCase_ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCamelCase_ , -1 , -1 ) , range(lowerCamelCase_ , len(lowerCamelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : int ):
if row >= len(lowerCamelCase_ ):
solution.append(lowerCamelCase_ )
printboard(lowerCamelCase_ )
print()
return True
for i in range(len(lowerCamelCase_ ) ):
if is_safe(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = 1
solve(lowerCamelCase_ , row + 1 )
SCREAMING_SNAKE_CASE__ = 0
return False
def __lowercase ( lowerCamelCase_ : list[list[int]] ):
for i in range(len(lowerCamelCase_ ) ):
for j in range(len(lowerCamelCase_ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_lowerCamelCase = 8
_lowerCamelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 112
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 112
| 1
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(_lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase_ : Tuple = load_file(_lowercase )
UpperCAmelCase_ : Union[str, Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase_ : List[Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
UpperCAmelCase_ : Optional[int] = pipeline.text_encoder
else:
UpperCAmelCase_ : int = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
UpperCAmelCase_ : str = pipeline.unet
# find the target layer
UpperCAmelCase_ : str = layer_infos.pop(0 )
while len(_lowercase ) > -1:
try:
UpperCAmelCase_ : Optional[Any] = curr_layer.__getattr__(_lowercase )
if len(_lowercase ) > 0:
UpperCAmelCase_ : List[Any] = layer_infos.pop(0 )
elif len(_lowercase ) == 0:
break
except Exception:
if len(_lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase_ : Dict = layer_infos.pop(0 )
UpperCAmelCase_ : Optional[int] = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(_lowercase )
else:
pair_keys.append(_lowercase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase_ : Union[str, Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase_ : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowercase , _lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase_ : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase_ : List[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowercase , _lowercase )
# update visited list
for item in pair_keys:
visited.append(_lowercase )
return pipeline
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
__a = parser.parse_args()
__a = args.base_model_path
__a = args.checkpoint_path
__a = args.dump_path
__a = args.lora_prefix_unet
__a = args.lora_prefix_text_encoder
__a = args.alpha
__a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__a = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 30
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "vision-encoder-decoder"
a = True
def __init__( self : str , **_A : str):
"""simple docstring"""
super().__init__(**_A)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""")
_SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""encoder""")
_SCREAMING_SNAKE_CASE : Optional[int] = encoder_config.pop("""model_type""")
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""decoder""")
_SCREAMING_SNAKE_CASE : Tuple = decoder_config.pop("""model_type""")
_SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.for_model(_A , **_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.for_model(_A , **_A)
_SCREAMING_SNAKE_CASE : Any = True
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : Optional[int]):
"""simple docstring"""
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = copy.deepcopy(self.__dict__)
_SCREAMING_SNAKE_CASE : Dict = self.encoder.to_dict()
_SCREAMING_SNAKE_CASE : List[Any] = self.decoder.to_dict()
_SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
class _snake_case ( __snake_case ):
"""simple docstring"""
a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
return 1e-4
@property
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}})
class _snake_case ( __snake_case ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = OrderedDict()
_SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_SCREAMING_SNAKE_CASE : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def _lowerCAmelCase ( self : Dict , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ):
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE : List[Any] = OrderedDict()
_SCREAMING_SNAKE_CASE : Any = super().generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_input["""input_ids"""].shape
_SCREAMING_SNAKE_CASE : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_SCREAMING_SNAKE_CASE : Any = dummy_input.pop("""input_ids""")
_SCREAMING_SNAKE_CASE : Optional[int] = dummy_input.pop("""attention_mask""")
_SCREAMING_SNAKE_CASE : str = torch.zeros(_A)
return common_inputs
class _snake_case ( __snake_case ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Any , _A : PretrainedConfig):
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_A)
def _lowerCAmelCase ( self : List[Any] , _A : PretrainedConfig , _A : PretrainedConfig , _A : str = "default"):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_A , _A)
| 338
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__magic_name__ = '''Create a default config file for Accelerate with only a few flags set.'''
def __snake_case ( _UpperCAmelCase="no" , _UpperCAmelCase = default_json_config_file , _UpperCAmelCase = False ):
"""simple docstring"""
lowercase = Path(_UpperCAmelCase )
path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
lowercase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
lowercase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
lowercase = num_gpus
lowercase = False
if num_gpus > 1:
lowercase = 'MULTI_GPU'
else:
lowercase = 'NO'
elif is_xpu_available() and use_xpu:
lowercase = torch.xpu.device_count()
lowercase = num_xpus
lowercase = False
if num_xpus > 1:
lowercase = 'MULTI_XPU'
else:
lowercase = 'NO'
elif is_npu_available():
lowercase = torch.npu.device_count()
lowercase = num_npus
lowercase = False
if num_npus > 1:
lowercase = 'MULTI_NPU'
else:
lowercase = 'NO'
else:
lowercase = 0
lowercase = True
lowercase = 1
lowercase = 'NO'
lowercase = ClusterConfig(**_UpperCAmelCase )
config.to_json_file(_UpperCAmelCase )
return path
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = parser.add_parser('default' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_UpperCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 314
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
if "model" in orig_key:
lowercase = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
lowercase = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
lowercase = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
lowercase = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
lowercase = orig_key.split('.' )[0].split('_' )[-1]
lowercase = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
lowercase = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
lowercase = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
lowercase = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
lowercase = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
lowercase = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
lowercase = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
lowercase = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
lowercase = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
lowercase = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
lowercase = 'yoso.' + orig_key
return orig_key
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(_UpperCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase = val
lowercase = orig_state_dict['cls.predictions.decoder.bias']
lowercase = torch.arange(_UpperCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = torch.load(_UpperCAmelCase , map_location='cpu' )['model_state_dict']
lowercase = YosoConfig.from_json_file(_UpperCAmelCase )
lowercase = YosoForMaskedLM(_UpperCAmelCase )
lowercase = convert_checkpoint_helper(config.max_position_embeddings , _UpperCAmelCase )
print(model.load_state_dict(_UpperCAmelCase ) )
model.eval()
model.save_pretrained(_UpperCAmelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 314
| 1
|
import csv
import tweepy
# Twitter API credentials
__lowerCamelCase : Dict = ''''''
__lowerCamelCase : int = ''''''
__lowerCamelCase : Optional[Any] = ''''''
__lowerCamelCase : List[str] = ''''''
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE_ : Dict = api.user_timeline(screen_name=lowerCAmelCase , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : Dict = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(f'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE_ : str = api.user_timeline(
screen_name=lowerCAmelCase , count=2_0_0 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : Any = alltweets[-1].id - 1
print(f'...{len(lowerCAmelCase )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE_ : List[str] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'new_{screen_name}_tweets.csv' , "w" ) as f:
SCREAMING_SNAKE_CASE_ : Dict = csv.writer(lowerCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 216
|
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE_ : Optional[int] = True
for i in range(lowerCAmelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE_ : int = True
if a[i].islower():
SCREAMING_SNAKE_CASE_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = "▁"
__lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCAmelCase = {
"facebook/xglm-564M": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[str] , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : str ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 710
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 50_257 , __UpperCamelCase : int = 1_024 , __UpperCamelCase : int = 768 , __UpperCamelCase : int = 12 , __UpperCamelCase : int = 12 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "gelu_new" , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 1e-5 , __UpperCamelCase : float = 0.02 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , ):
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , __UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=__UpperCamelCase , n_positions=__UpperCamelCase , n_embd=__UpperCamelCase , n_layer=__UpperCamelCase , n_head=__UpperCamelCase , n_inner=__UpperCamelCase , activation_function=__UpperCamelCase , resid_pdrop=__UpperCamelCase , embd_pdrop=__UpperCamelCase , attn_pdrop=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase , initializer_range=__UpperCamelCase , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , scale_attn_by_inverse_layer_idx=__UpperCamelCase , reorder_and_upcast_attn=__UpperCamelCase , )
_UpperCAmelCase = GPTaLMHeadModel(__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : torch.Tensor , __UpperCamelCase : torch.Tensor , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[torch.Tensor] = None , ):
_UpperCAmelCase = self.transformer.transformer.wte(__UpperCamelCase )
_UpperCAmelCase = self.encode_prefix(__UpperCamelCase )
_UpperCAmelCase = self.decode_prefix(__UpperCamelCase )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=__UpperCamelCase , labels=__UpperCamelCase , attention_mask=__UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : torch.device ):
return torch.zeros(__UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=__UpperCamelCase )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Optional[Any] ):
return self.encode_prefix(__UpperCamelCase )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = torch.split(__UpperCamelCase , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(__UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=__UpperCamelCase , device=__UpperCamelCase , eos_token_id=__UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(__UpperCamelCase )
_UpperCAmelCase = torch.stack(__UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : int = 5 , __UpperCamelCase : int = 67 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : Optional[int] = None , ):
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.int )
_UpperCAmelCase = torch.zeros(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(__UpperCamelCase )
for i in range(__UpperCamelCase ):
_UpperCAmelCase = self.transformer(inputs_embeds=__UpperCamelCase )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(__UpperCamelCase , -1 )
_UpperCAmelCase = generated.expand(__UpperCamelCase , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(__UpperCamelCase , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(__UpperCamelCase , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(__UpperCamelCase ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=__UpperCamelCase )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(__UpperCamelCase , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 129
| 0
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCamelCase__ = datasets.utils.logging.get_logger(__name__)
lowerCamelCase__ = ["""names""", """prefix"""]
lowerCamelCase__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
lowerCamelCase__ = ["""encoding_errors""", """on_bad_lines"""]
lowerCamelCase__ = ["""date_format"""]
@dataclass
class SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
__lowerCamelCase : str =","
__lowerCamelCase : Optional[str] =None
__lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
__lowerCamelCase : Optional[List[str]] =None
__lowerCamelCase : Optional[List[str]] =None
__lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
__lowerCamelCase : Optional[Union[List[int], List[str]]] =None
__lowerCamelCase : Optional[str] =None
__lowerCamelCase : bool =True
__lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
__lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
__lowerCamelCase : Optional[list] =None
__lowerCamelCase : Optional[list] =None
__lowerCamelCase : bool =False
__lowerCamelCase : Optional[Union[int, List[int]]] =None
__lowerCamelCase : Optional[int] =None
__lowerCamelCase : Optional[Union[str, List[str]]] =None
__lowerCamelCase : bool =True
__lowerCamelCase : bool =True
__lowerCamelCase : bool =False
__lowerCamelCase : bool =True
__lowerCamelCase : Optional[str] =None
__lowerCamelCase : str ="."
__lowerCamelCase : Optional[str] =None
__lowerCamelCase : str ='"'
__lowerCamelCase : int =0
__lowerCamelCase : Optional[str] =None
__lowerCamelCase : Optional[str] =None
__lowerCamelCase : Optional[str] =None
__lowerCamelCase : Optional[str] =None
__lowerCamelCase : bool =True
__lowerCamelCase : bool =True
__lowerCamelCase : int =0
__lowerCamelCase : bool =True
__lowerCamelCase : bool =False
__lowerCamelCase : Optional[str] =None
__lowerCamelCase : int =10_000
__lowerCamelCase : Optional[datasets.Features] =None
__lowerCamelCase : Optional[str] ="strict"
__lowerCamelCase : Literal["error", "warn", "skip"] ="error"
__lowerCamelCase : Optional[str] =None
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if self.delimiter is not None:
__a = self.delimiter
if self.column_names is not None:
__a = self.column_names
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowercase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
__lowerCamelCase : Union[str, Any] =CsvConfig
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self : int , __lowercase : Tuple ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
__a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowercase , (str, list, tuple) ):
__a = data_files
if isinstance(__lowercase , __lowercase ):
__a = [files]
__a = [dl_manager.iter_files(__lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__a = []
for split_name, files in data_files.items():
if isinstance(__lowercase , __lowercase ):
__a = [files]
__a = [dl_manager.iter_files(__lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={"""files""": files} ) )
return splits
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
__a = self.config.features.arrow_schema
if all(not require_storage_cast(__lowercase ) for feature in self.config.features.values() ):
# cheaper cast
__a = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowercase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__a = table_cast(__lowercase , __lowercase )
return pa_table
def UpperCamelCase_ ( self : Any , __lowercase : List[Any] ):
'''simple docstring'''
__a = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__a = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowercase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowercase ) ):
__a = pd.read_csv(__lowercase , iterator=__lowercase , dtype=__lowercase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowercase ):
__a = pa.Table.from_pandas(__lowercase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowercase )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(__lowercase )}: {e}" )
raise
| 225
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = num - 1
__a = 0
while s % 2 == 0:
__a = s // 2
t += 1
for _ in range(5 ):
__a = random.randrange(2 , num - 1 )
__a = pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if v != 1:
__a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__a = i + 1
__a = (v**2) % num
return True
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num < 2:
return False
__a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1024 ):
"""simple docstring"""
while True:
__a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_SCREAMING_SNAKE_CASE ):
return num
if __name__ == "__main__":
lowerCamelCase__ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 225
| 1
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a__: str = 'sshleifer/bart-tiny-random'
a__: str = 'patrickvonplaten/t5-tiny-random'
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return AutoConfig.from_pretrained(UpperCamelCase__ )
def UpperCamelCase ( self ):
A__ = create_student_by_copying_alternating_layers(UpperCamelCase__,tempfile.mkdtemp(),e=1,d=1 )
self.assertEqual(student.config.num_hidden_layers,1 )
def UpperCamelCase ( self ):
A__ = create_student_by_copying_alternating_layers(UpperCamelCase__,tempfile.mkdtemp(),e=1,d=UpperCamelCase__ )
def UpperCamelCase ( self ):
A__ = create_student_by_copying_alternating_layers(UpperCamelCase__,tempfile.mkdtemp(),e=1,d=UpperCamelCase__ )
self.assertEqual(student.config.encoder_layers,1 )
self.assertEqual(student.config.decoder_layers,self.teacher_config.encoder_layers )
def UpperCamelCase ( self ):
A__ = create_student_by_copying_alternating_layers(UpperCamelCase__,tempfile.mkdtemp(),e=1,d=1 )
self.assertEqual(student.config.encoder_layers,1 )
self.assertEqual(student.config.decoder_layers,1 )
def UpperCamelCase ( self ):
with self.assertRaises(UpperCamelCase__ ):
create_student_by_copying_alternating_layers(UpperCamelCase__,tempfile.mkdtemp(),e=UpperCamelCase__,d=UpperCamelCase__ )
| 709
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a__: Any = logging.get_logger(__name__)
a__: Optional[int] = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''mctct'''
def __init__( self,__lowerCamelCase=8065,__lowerCamelCase=1536,__lowerCamelCase=36,__lowerCamelCase=6144,__lowerCamelCase=4,__lowerCamelCase=384,__lowerCamelCase=920,__lowerCamelCase=1E-5,__lowerCamelCase=0.3,__lowerCamelCase="relu",__lowerCamelCase=0.02,__lowerCamelCase=0.3,__lowerCamelCase=0.3,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,__lowerCamelCase=1,__lowerCamelCase=0.3,__lowerCamelCase=1,__lowerCamelCase=(7,),__lowerCamelCase=(3,),__lowerCamelCase=80,__lowerCamelCase=1,__lowerCamelCase=None,__lowerCamelCase="sum",__lowerCamelCase=False,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = intermediate_size
A__ = num_attention_heads
A__ = attention_head_dim
A__ = max_position_embeddings
A__ = layer_norm_eps
A__ = layerdrop
A__ = hidden_act
A__ = initializer_range
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = pad_token_id
A__ = bos_token_id
A__ = eos_token_id
A__ = conv_glu_dim
A__ = conv_dropout
A__ = num_conv_layers
A__ = input_feat_per_channel
A__ = input_channels
A__ = conv_channels
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# prevents config testing fail with exporting to json
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 212
| 0
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A_ ( _lowerCamelCase : Tuple ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A_ ( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
_lowerCAmelCase = [1, 2, 3]
with pytest.raises(_lowerCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=2 )
with pytest.raises(_lowerCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def A_ ( _lowerCamelCase : Optional[int] ):
_lowerCAmelCase = [1, 2]
_lowerCAmelCase = {'a': 1, 'b': 2}
_lowerCAmelCase = {'a': [1, 2], 'b': [3, 4]}
_lowerCAmelCase = {'a': {'1': 1}, 'b': 2}
_lowerCAmelCase = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
_lowerCAmelCase = [2, 3]
_lowerCAmelCase = {'a': 2, 'b': 3}
_lowerCAmelCase = {'a': [2, 3], 'b': [4, 5]}
_lowerCAmelCase = {'a': {'1': 2}, 'b': 3}
_lowerCAmelCase = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ :Optional[Any] = pytest.mark.integration
UpperCAmelCase__ :int = {"""comet"""}
UpperCAmelCase__ :int = importlib.util.find_spec("""fairseq""") is not None
UpperCAmelCase__ :Union[str, Any] = {"""code_eval"""}
UpperCAmelCase__ :List[Any] = os.name == """nt"""
UpperCAmelCase__ :Optional[Any] = {"""bertscore""", """frugalscore""", """perplexity"""}
UpperCAmelCase__ :int = importlib.util.find_spec("""transformers""") is not None
def __lowercase (_lowercase ) -> str:
"""simple docstring"""
@wraps(_lowercase )
def wrapper(self, _lowercase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self, _lowercase )
return wrapper
def __lowercase (_lowercase ) -> Tuple:
"""simple docstring"""
@wraps(_lowercase )
def wrapper(self, _lowercase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self, _lowercase )
return wrapper
def __lowercase (_lowercase ) -> Dict:
"""simple docstring"""
@wraps(_lowercase )
def wrapper(self, _lowercase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self, _lowercase )
return wrapper
def __lowercase () -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@local
class SCREAMING_SNAKE_CASE ( parameterized.TestCase ):
snake_case__ : Dict = {}
snake_case__ : List[str] = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def a_ ( self : int , A__ : List[str] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = """[...]"""
__lowerCamelCase : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , A__ ) ).module_path )
__lowerCamelCase : int = datasets.load.import_main_class(metric_module.__name__ , dataset=A__ )
# check parameters
__lowerCamelCase : Optional[Any] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(A__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowerCamelCase : Any = doctest.testmod(A__ , verbose=A__ , raise_on_error=A__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def a_ ( self : Optional[int] , A__ : Any ):
"""simple docstring"""
__lowerCamelCase : List[Any] = """[...]"""
__lowerCamelCase : List[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , A__ ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowerCamelCase : int = doctest.testmod(A__ , verbose=A__ , raise_on_error=A__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def a_ ( self : int , A__ : Union[str, Any] , A__ : Optional[Any] ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](A__ ):
yield
else:
yield
@contextmanager
def a_ ( self : Any ):
"""simple docstring"""
def load_local_metric(A__ : Optional[Any] , *A__ : str , **A__ : str ):
return load_metric(os.path.join("""metrics""" , A__ ) , *A__ , **A__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
__lowerCamelCase : str = load_local_metric
yield
@classmethod
def a_ ( cls : Any , A__ : Union[str, Any] ):
"""simple docstring"""
def wrapper(A__ : Tuple ):
__lowerCamelCase : List[str] = contextmanager(A__ )
__lowerCamelCase : List[str] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def __lowercase (_lowercase ) -> Optional[Any]:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""", """""", """""" ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def a_ ( self : Union[str, Any] , A__ : Dict ):
"""simple docstring"""
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
__lowerCamelCase : Optional[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def __lowercase (_lowercase ) -> List[str]:
"""simple docstring"""
import torch
def bert_cos_score_idf(_lowercase, _lowercase, *_lowercase, **_lowercase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_lowercase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
__lowerCamelCase : Tuple = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def __lowercase (_lowercase ) -> int:
"""simple docstring"""
def load_from_checkpoint(_lowercase ):
class SCREAMING_SNAKE_CASE :
def a_ ( self : Dict , A__ : Dict , *A__ : Dict , **A__ : Optional[int] ):
"""simple docstring"""
assert len(A__ ) == 2
__lowerCamelCase : Any = [0.19, 0.92]
return scores, sum(A__ ) / len(A__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
__lowerCamelCase : Dict = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
__lowerCamelCase : str = load_from_checkpoint
yield
def __lowercase () -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase : Dict = load_metric(os.path.join("""metrics""", """seqeval""" ) )
__lowerCamelCase : Optional[int] = """ERROR"""
__lowerCamelCase : Optional[int] = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(_lowercase, match=re.escape(_lowercase ) ):
metric.compute(predictions=[], references=[], scheme=_lowercase )
| 710
|
'''simple docstring'''
from __future__ import annotations
from random import random
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , A__ : int | None = None ):
"""simple docstring"""
__lowerCamelCase : int = value
__lowerCamelCase : Dict = random()
__lowerCamelCase : Node | None = None
__lowerCamelCase : Node | None = None
def __repr__( self : Any ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase : List[str] = str(self.value ) + """ """
__lowerCamelCase : Any = str(self.left or """""" )
__lowerCamelCase : List[str] = str(self.right or """""" )
return value + left + right
def __lowercase (_lowercase, _lowercase ) -> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__lowerCamelCase , __lowerCamelCase : int = split(root.left, _lowercase )
return left, root
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = split(root.right, _lowercase )
return root, right
def __lowercase (_lowercase, _lowercase ) -> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__lowerCamelCase : Tuple = merge(left.right, _lowercase )
return left
else:
__lowerCamelCase : int = merge(_lowercase, right.left )
return right
def __lowercase (_lowercase, _lowercase ) -> Node | None:
"""simple docstring"""
__lowerCamelCase : List[str] = Node(_lowercase )
__lowerCamelCase , __lowerCamelCase : List[Any] = split(_lowercase, _lowercase )
return merge(merge(_lowercase, _lowercase ), _lowercase )
def __lowercase (_lowercase, _lowercase ) -> Node | None:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : int = split(_lowercase, value - 1 )
__lowerCamelCase , __lowerCamelCase : List[str] = split(_lowercase, _lowercase )
return merge(_lowercase, _lowercase )
def __lowercase (_lowercase ) -> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value, end=""",""" )
inorder(root.right )
def __lowercase (_lowercase, _lowercase ) -> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__lowerCamelCase : Dict = insert(_lowercase, int(arg[1:] ) )
elif arg[0] == "-":
__lowerCamelCase : List[str] = erase(_lowercase, int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def __lowercase () -> None:
"""simple docstring"""
__lowerCamelCase : str = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
__lowerCamelCase : int = input()
while args != "q":
__lowerCamelCase : List[Any] = interact_treap(_lowercase, _lowercase )
print(_lowercase )
__lowerCamelCase : List[Any] = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 483
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
lowerCamelCase = {
"""gpt-neox-20b""": 2048,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]="<|endoftext|>" , _lowerCAmelCase : List[str]="<|endoftext|>" , _lowerCAmelCase : int="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =getattr(_lowerCAmelCase , pre_tok_state.pop('type'))
__lowercase =add_prefix_space
__lowercase =pre_tok_class(**_lowerCAmelCase)
__lowercase =add_prefix_space
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
__lowercase =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase)
return tuple(_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : "Conversation"):
'''simple docstring'''
__lowercase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase) + [self.eos_token_id])
if len(_lowerCAmelCase) > self.model_max_length:
__lowercase =input_ids[-self.model_max_length :]
return input_ids
| 474
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = tf.data.AUTOTUNE
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=_lowerCAmelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=_lowerCAmelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=_lowerCAmelCase , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=_lowerCAmelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=_lowerCAmelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=_lowerCAmelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=_lowerCAmelCase , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=_lowerCAmelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=_lowerCAmelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=_lowerCAmelCase , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=_lowerCAmelCase , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=_lowerCAmelCase , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=_lowerCAmelCase , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=_lowerCAmelCase , help='Model ID to upload to on the Hugging Face Hub.' )
__lowercase =parser.parse_args()
return args
def _A ( _lowerCAmelCase ):
"""simple docstring"""
try:
if args.tpu_name:
__lowercase =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__lowercase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(_lowerCAmelCase )
tf.tpu.experimental.initialize_tpu_system(_lowerCAmelCase )
return tpu
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0
for file in file_list:
__lowercase =file.split('/' )[-1]
__lowercase =re.search(r'-\d+-(\d+)\.tfrecord' , _lowerCAmelCase ).group(1 )
__lowercase =int(_lowerCAmelCase )
num_samples += sample_count
return num_samples
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__lowercase =count_samples(_lowerCAmelCase )
__lowercase =tf.data.Dataset.from_tensor_slices(_lowerCAmelCase )
if shuffle:
__lowercase =dataset.shuffle(len(_lowerCAmelCase ) )
__lowercase =tf.data.TFRecordDataset(_lowerCAmelCase , num_parallel_reads=_lowerCAmelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowercase =dataset.apply(tf.data.experimental.assert_cardinality(_lowerCAmelCase ) )
__lowercase =dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase )
if shuffle:
assert shuffle_buffer_size is not None
__lowercase =dataset.shuffle(args.shuffle_buffer_size )
__lowercase =dataset.batch(_lowerCAmelCase , drop_remainder=_lowerCAmelCase )
__lowercase =dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase )
__lowercase =dataset.prefetch(_lowerCAmelCase )
return dataset
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not args.no_tpu:
__lowercase =initialize_tpu(_lowerCAmelCase )
__lowercase =tf.distribute.TPUStrategy(_lowerCAmelCase )
else:
__lowercase =tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
__lowercase =AutoTokenizer.from_pretrained(args.tokenizer )
__lowercase =AutoConfig.from_pretrained(args.pretrained_model_config )
__lowercase =tokenizer.vocab_size
__lowercase =tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
__lowercase =tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
__lowercase =count_samples(_lowerCAmelCase )
__lowercase =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowercase =steps_per_epoch * args.num_epochs
with strategy.scope():
__lowercase =TFAutoModelForMaskedLM.from_config(_lowerCAmelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowercase , __lowercase =create_optimizer(
num_train_steps=_lowerCAmelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowerCAmelCase , metrics=['accuracy'] )
def decode_fn(_lowerCAmelCase ):
__lowercase ={
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowerCAmelCase , _lowerCAmelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowercase =DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm_probability=args.mlm_probability , mlm=_lowerCAmelCase , return_tensors='tf' )
def mask_with_collator(_lowerCAmelCase ):
# TF really needs an isin() function
__lowercase =(
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
__lowercase , __lowercase =data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(_lowerCAmelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowerCAmelCase , )
return batch
__lowercase =args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowercase =prepare_dataset(
_lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
__lowercase =prepare_dataset(
_lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , )
__lowercase =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowerCAmelCase ) )
model.fit(
_lowerCAmelCase , validation_data=_lowerCAmelCase , epochs=args.num_epochs , callbacks=_lowerCAmelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase = parse_args()
main(args)
| 474
| 1
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 713
|
"""simple docstring"""
from math import sqrt
def _lowercase ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( __lowerCAmelCase = 1_0001 ) -> int:
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'{solution() = }')
| 12
| 0
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _A ( UpperCamelCase ):
"""simple docstring"""
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : float ) -> float:
return 0.0
def lowercase__ ( A_: np.ndarray , A_: int ) -> tuple[int | float, int | float]:
"""simple docstring"""
__UpperCAmelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCAmelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowercase__ ( A_: FilterType , A_: int ) -> None:
"""simple docstring"""
__UpperCAmelCase =512
__UpperCAmelCase =[1] + [0] * (size - 1)
__UpperCAmelCase =[filter_type.process(A_ ) for item in inputs]
__UpperCAmelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCAmelCase =np.abs(np.fft.fft(A_ ) )
__UpperCAmelCase =20 * np.logaa(A_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
__UpperCAmelCase =get_bounds(A_ , A_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(A_ )
plt.show()
def lowercase__ ( A_: FilterType , A_: int ) -> None:
"""simple docstring"""
__UpperCAmelCase =512
__UpperCAmelCase =[1] + [0] * (size - 1)
__UpperCAmelCase =[filter_type.process(A_ ) for item in inputs]
__UpperCAmelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCAmelCase =np.angle(np.fft.fft(A_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(A_ , -2 * pi ) )
plt.show()
| 68
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Any )->Any:
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
_UpperCAmelCase = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] , **__UpperCamelCase : str )->str:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : int , **__UpperCamelCase : Any )->List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : str , **__UpperCamelCase : Optional[Any] )->List[str]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : List[str] )->Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Dict )->Union[str, Any]:
_UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Tuple )->Any:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase )
_UpperCAmelCase = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCamelCase )
def lowercase__ ( self : Dict )->Optional[Any]:
_UpperCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCAmelCase = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(__UpperCamelCase , return_tensors='''np''' )
_UpperCAmelCase = processor(images=__UpperCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = '''lower newer'''
_UpperCAmelCase = processor(text=__UpperCamelCase )
_UpperCAmelCase = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[int] )->str:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = '''lower newer'''
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(images=__UpperCamelCase , visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def lowercase__ ( self : Any )->Any:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(__UpperCamelCase )
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
| 602
| 0
|
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8
|
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> str:
snake_case__ = 1
snake_case__ = 3
snake_case__ = (32, 32)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase_ )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=UpperCamelCase_ , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case__ = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> List[str]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _snake_case ( self ) -> str:
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case__ = unet.half()
snake_case__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='np' , ).images
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Optional[int]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _snake_case ( self ) -> List[Any]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _snake_case ( self ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='np' , )
snake_case__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 368
| 0
|
A_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> list[int]:
"""simple docstring"""
lowercase = True
lowercase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
order.append(UpperCAmelCase )
return order
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> list[int]:
"""simple docstring"""
lowercase = True
lowercase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
return component
def __UpperCAmelCase ( UpperCAmelCase )-> list[list[int]]:
"""simple docstring"""
lowercase = len(UpperCAmelCase ) * [False]
lowercase = {vert: [] for vert in range(len(UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCAmelCase )
lowercase = []
for i, was_visited in enumerate(UpperCAmelCase ):
if not was_visited:
order += topology_sort(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
lowercase = []
lowercase = len(UpperCAmelCase ) * [False]
for i in range(len(UpperCAmelCase ) ):
lowercase = order[len(UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowercase = find_components(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
components_list.append(UpperCAmelCase )
return components_list
| 479
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __lowercase ( datasets.BeamBasedBuilder ):
def __a ( self : Tuple ) -> Tuple:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__lowerCamelCase , )
def __a ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ) -> int:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def __a ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCamelCase )
class __lowercase ( datasets.BeamBasedBuilder ):
def __a ( self : int ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__lowerCamelCase , )
def __a ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ) -> int:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def __a ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCamelCase )
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class __lowercase ( _A ):
@require_beam
def __a ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase = DummyBeamDataset(cache_dir=__lowerCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowercase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __a ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import apache_beam as beam
lowercase = beam.io.parquetio.WriteToParquet
lowercase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase = DummyBeamDataset(cache_dir=__lowerCamelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
lowercase = partial(__lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowercase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(__lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __a ( self : int ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase = DummyBeamDataset(cache_dir=__lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase = NestedBeamDataset(cache_dir=__lowerCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
lowercase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 479
| 1
|
'''simple docstring'''
import re
import subprocess
import sys
UpperCamelCase__ : Union[str, Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase__ : Dict = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
UpperCamelCase__ : Dict = '|'.join(sys.argv[1:])
UpperCamelCase__ : Tuple = re.compile(rf"""^({joined_dirs}).*?\.py$""")
UpperCamelCase__ : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 614
|
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = int(_snake_case )
if decimal in (0, 1): # Exit cases for the recursion
return str(_snake_case )
UpperCAmelCase , UpperCAmelCase = divmod(_snake_case , 2 )
return binary_recursive(_snake_case ) + str(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = str(_snake_case ).strip()
if not number:
raise ValueError("""No input value was provided""" )
UpperCAmelCase = """-""" if number.startswith("""-""" ) else """"""
UpperCAmelCase = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F'''{negative}0b{binary_recursive(int(_snake_case ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341
| 0
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCAmelCase = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def UpperCamelCase ( a ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = torch.load(a , map_location='''cpu''' )
return sd
def UpperCamelCase ( a , a , a=rename_keys_prefix ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OrderedDict()
__magic_name__ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__magic_name__ = key
for name_pair in rename_keys_prefix:
__magic_name__ = new_key.replace(name_pair[0] , name_pair[1] )
__magic_name__ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__magic_name__ = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__magic_name__ = '''pretraining'''
if "vcr" in checkpoint_path:
__magic_name__ = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
__magic_name__ = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
__magic_name__ = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
__magic_name__ = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__magic_name__ = {'''visual_embedding_dim''': 512}
__magic_name__ = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
__magic_name__ = {'''visual_embedding_dim''': 2048}
__magic_name__ = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
__magic_name__ = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
__magic_name__ = '''vqa'''
elif "nlvr" in checkpoint_path:
__magic_name__ = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
__magic_name__ = '''nlvr'''
__magic_name__ = VisualBertConfig(**a )
# Load State Dict
__magic_name__ = load_state_dict(a )
__magic_name__ = get_new_dict(a , a )
if model_type == "pretraining":
__magic_name__ = VisualBertForPreTraining(a )
elif model_type == "vqa":
__magic_name__ = VisualBertForQuestionAnswering(a )
elif model_type == "nlvr":
__magic_name__ = VisualBertForVisualReasoning(a )
elif model_type == "multichoice":
__magic_name__ = VisualBertForMultipleChoice(a )
model.load_state_dict(a )
# Save Checkpoints
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 245
|
'''simple docstring'''
_lowerCAmelCase = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a ) )
def UpperCamelCase ( ) -> int:
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(a ) )
if __name__ == "__main__":
print(solution())
| 245
| 1
|
import sys
import turtle
def lowerCamelCase ( a_ , a_ ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase ( a_ , a_ , a_ , a_ , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(a_ , get_mid(a_ , a_ ) , get_mid(a_ , a_ ) , depth - 1 )
triangle(a_ , get_mid(a_ , a_ ) , get_mid(a_ , a_ ) , depth - 1 )
triangle(a_ , get_mid(a_ , a_ ) , get_mid(a_ , a_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
lowerCamelCase_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
lowerCamelCase_ = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 318
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Union[str, Any] = '''resnet'''
__a: List[Any] = ['''basic''', '''bottleneck''']
def __init__( self , lowercase_=3 , lowercase_=6_4 , lowercase_=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowercase_=[3, 4, 6, 3] , lowercase_="bottleneck" , lowercase_="relu" , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embedding_size
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = layer_type
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = downsample_in_first_stage
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[int] = version.parse('''1.11''' )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-3
| 318
| 1
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_a : Tuple= False
try:
_a : str= _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCamelCase :
def __init__(self : List[str] , _A : str = None , _A : list = []) -> Any:
__snake_case : int = 0
__snake_case : List[Any] = choices
__snake_case : List[str] = prompt
if sys.platform == "win32":
__snake_case : Union[str, Any] = '*'
else:
__snake_case : Optional[Any] = '➔ '
def _lowercase (self : Any , _A : Optional[Any] , _A : str = "") -> Optional[Any]:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _A)
else:
forceWrite(self.choices[index] , _A)
def _lowercase (self : List[Any] , _A : int) -> Any:
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(_A)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def _lowercase (self : Dict , _A : Direction , _A : int = 1) -> List[Any]:
__snake_case : List[str] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_A)
move_cursor(_A , direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP['up'])
def _lowercase (self : Optional[int]) -> Tuple:
self.move_direction(Direction.UP)
@input.mark(KEYMAP['down'])
def _lowercase (self : Any) -> Optional[int]:
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP['newline'])
def _lowercase (self : Union[str, Any]) -> int:
move_cursor(len(self.choices) - self.position , 'DOWN')
return self.position
@input.mark(KEYMAP['interrupt'])
def _lowercase (self : Any) -> List[Any]:
move_cursor(len(self.choices) - self.position , 'DOWN')
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_A)] for number in range(10)])
def _lowercase (self : List[Any]) -> int:
__snake_case : Optional[Any] = int(chr(self.current_selection))
__snake_case : List[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP , -movement)
elif self.position < index:
self.move_direction(Direction.DOWN , _A)
else:
return
else:
return
def _lowercase (self : List[Any] , _A : int = 0) -> List[str]:
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n')
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n')
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n')
__snake_case : str = default_choice
for i in range(len(self.choices)):
self.print_choice(_A)
forceWrite('\n')
move_cursor(len(self.choices) - self.position , 'UP')
with cursor.hide():
while True:
if in_colab:
try:
__snake_case : Optional[Any] = int(builtins.input())
except ValueError:
__snake_case : str = default_choice
else:
__snake_case : Dict = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1 , 'UP')
clear_line()
self.write_choice(_A , '\n')
return choice
| 192
|
"""simple docstring"""
import math
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
__snake_case : List[str] = len(UpperCAmelCase_ )
__snake_case : List[Any] = int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
__snake_case : Any = 0
while arr[min(UpperCAmelCase_ , UpperCAmelCase_ ) - 1] < x:
__snake_case : Tuple = step
step += int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__snake_case : Union[str, Any] = prev + 1
if prev == min(UpperCAmelCase_ , UpperCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_a : str= input("Enter numbers separated by a comma:\n").strip()
_a : int= [int(item) for item in user_input.split(",")]
_a : Optional[Any]= int(input("Enter the number to be searched:\n"))
_a : Tuple= jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 192
| 1
|
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = 9, 14 # noqa: F841
UpperCAmelCase__ : Union[str, Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase__ : Optional[int] = defaultdict(snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase__ : Union[str, Any] = mst(snake_case )
UpperCAmelCase__ : Dict = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase__ : Dict = tuple(answer[:2] )
UpperCAmelCase__ : Any = tuple(edge[::-1] )
assert edge in result or reverse in result
| 438
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCAmelCase : Union[str, Any] = threading.Lock()
_lowerCAmelCase : Optional[logging.Handler] = None
_lowerCAmelCase : Union[str, Any] = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_lowerCAmelCase : Dict = logging.WARNING
_lowerCAmelCase : Optional[Any] = True
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = os.getenv("TRANSFORMERS_VERBOSITY" , snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
return __name__.split("." )[0]
def SCREAMING_SNAKE_CASE__ ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase__ : Any = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase__ : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase__ : int = False
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase__ : Optional[int] = None
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
return log_levels
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None )-> logging.Logger:
'''simple docstring'''
if name is None:
UpperCAmelCase__ : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> Tuple:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( snake_case : logging.Handler )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : logging.Handler )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase__ : Dict = False
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase__ : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase__ : Union[str, Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Any = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case : List[str] , **snake_case : str )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , snake_case )
if no_advisory_warnings:
return
self.warning(*snake_case , **snake_case )
_lowerCAmelCase : int = warning_advice
@functools.lru_cache(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *snake_case : Dict , **snake_case : Any )-> Any:
'''simple docstring'''
self.warning(*snake_case , **snake_case )
_lowerCAmelCase : Tuple = warning_once
class lowerCAmelCase__ :
def __init__( self : List[str] , *snake_case__ : Any , **snake_case__ : List[str] ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase__ : List[Any] = args[0] if args else None
def __iter__( self : Any ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
def empty_fn(*snake_case__ : Dict , **snake_case__ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
'''simple docstring'''
return self
def __exit__( self : Tuple , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Optional[int] ):
'''simple docstring'''
return
class lowerCAmelCase__ :
def __call__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def __a ( self : Dict , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCAmelCase : Optional[int] = _tqdm_cls()
def SCREAMING_SNAKE_CASE__ ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase__ : int = True
hf_hub_utils.enable_progress_bars()
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase__ : Optional[Any] = False
hf_hub_utils.disable_progress_bars()
| 438
| 1
|
'''simple docstring'''
def snake_case_ ( a__ : list[int] ,a__ : list[int] ,a__ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(a__ ) )
def snake_case_ ( a__ : list[list[int]] ,a__ : int ,a__ : list[int] ,a__ : int ):
"""simple docstring"""
if index == len(a__ ):
return True
# Recursive Step
for i in range(a__ ):
if valid_coloring(graph[index] ,a__ ,a__ ):
# Color current vertex
__lowercase = i
# Validate coloring
if util_color(a__ ,a__ ,a__ ,index + 1 ):
return True
# Backtrack
__lowercase = -1
return False
def snake_case_ ( a__ : list[list[int]] ,a__ : int ):
"""simple docstring"""
__lowercase = [-1] * len(a__ )
if util_color(a__ ,a__ ,a__ ,0 ):
return colored_vertices
return []
| 717
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = str(id_ )
__lowercase = None
__lowercase = None
__lowercase = []
__lowercase = {} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
return self.key < other.key
def __repr__( self ) -> Any:
"""simple docstring"""
return self.id
def snake_case__ ( self , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
self.neighbors.append(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__lowercase = weight
def snake_case_ ( a__ : Optional[int] ,a__ : List[str] ,a__ : Dict ,a__ : List[str] ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,a__ )
graph[b - 1].add_edge(graph[a - 1] ,a__ )
def snake_case_ ( a__ : list ,a__ : Vertex ):
"""simple docstring"""
__lowercase = []
for u in graph:
__lowercase = math.inf
__lowercase = None
__lowercase = 0
__lowercase = graph[:]
while q:
__lowercase = min(a__ )
q.remove(a__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowercase = u
__lowercase = u.edges[v.id]
for i in range(1 ,len(a__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case_ ( a__ : list ,a__ : Vertex ):
"""simple docstring"""
for u in graph:
__lowercase = math.inf
__lowercase = None
__lowercase = 0
__lowercase = list(a__ )
hq.heapify(a__ )
while h:
__lowercase = hq.heappop(a__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowercase = u
__lowercase = u.edges[v.id]
hq.heapify(a__ )
for i in range(1 ,len(a__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[list[Edge]] = [[] for _ in range(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE_ : List[str] = size
def __getitem__( self , lowerCAmelCase__ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._size
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = deque([start_vertex] )
SCREAMING_SNAKE_CASE_ : list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE_ : int = 0
while queue:
SCREAMING_SNAKE_CASE_ : Tuple = queue.popleft()
SCREAMING_SNAKE_CASE_ : Dict = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE_ : Dict = current_distance + edge.weight
SCREAMING_SNAKE_CASE_ : List[Any] = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "codegen"
lowercase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCamelCase__=50400 , UpperCamelCase__=2048 , UpperCamelCase__=2048 , UpperCamelCase__=4096 , UpperCamelCase__=28 , UpperCamelCase__=16 , UpperCamelCase__=64 , UpperCamelCase__=None , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=50256 , UpperCamelCase__=50256 , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = n_ctx
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = rotary_dim
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = use_cache
A_ = bos_token_id
A_ = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase__ ):
# TODO: how to do that better?
A_ = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
A_ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
A_ = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A_ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_ , A_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ = seqlen + 2
A_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A_ = common_inputs["""attention_mask"""]
if self.use_past:
A_ = ordered_inputs["""attention_mask"""].dtype
A_ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 13
| 288
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __lowercase ):
A_ = 'informer'
A_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Tuple , _snake_case : Union[str, Any] = None , _snake_case : Dict = None , _snake_case : str = "student_t" , _snake_case : List[str] = "nll" , _snake_case : Optional[int] = 1 , _snake_case : Optional[Any] = None , _snake_case : Optional[Any] = "mean" , _snake_case : str = 0 , _snake_case : List[Any] = 0 , _snake_case : Optional[int] = 0 , _snake_case : Union[str, Any] = 0 , _snake_case : Optional[int] = None , _snake_case : Dict = None , _snake_case : List[str] = 64 , _snake_case : Optional[Any] = 32 , _snake_case : Optional[int] = 32 , _snake_case : str = 2 , _snake_case : List[Any] = 2 , _snake_case : List[str] = 2 , _snake_case : Tuple = 2 , _snake_case : Dict = True , _snake_case : int = "gelu" , _snake_case : int = 0.05 , _snake_case : Dict = 0.1 , _snake_case : str = 0.1 , _snake_case : List[str] = 0.1 , _snake_case : Union[str, Any] = 0.1 , _snake_case : Any = 100 , _snake_case : Tuple = 0.02 , _snake_case : int=True , _snake_case : Optional[Any] = "prob" , _snake_case : Optional[int] = 5 , _snake_case : List[Any] = True , **_snake_case : int , )->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = prediction_length
__lowerCAmelCase : Tuple = context_length or prediction_length
__lowerCAmelCase : Optional[int] = distribution_output
__lowerCAmelCase : Any = loss
__lowerCAmelCase : str = input_size
__lowerCAmelCase : List[Any] = num_time_features
__lowerCAmelCase : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__lowerCAmelCase : str = scaling
__lowerCAmelCase : List[str] = num_dynamic_real_features
__lowerCAmelCase : List[Any] = num_static_real_features
__lowerCAmelCase : List[Any] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_snake_case ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__lowerCAmelCase : Union[str, Any] = cardinality
else:
__lowerCAmelCase : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_snake_case ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__lowerCAmelCase : Tuple = embedding_dimension
else:
__lowerCAmelCase : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCAmelCase : Optional[int] = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase : int = input_size * len(self.lags_sequence ) + self._number_of_features
__lowerCAmelCase : List[Any] = d_model
__lowerCAmelCase : Optional[int] = encoder_attention_heads
__lowerCAmelCase : Dict = decoder_attention_heads
__lowerCAmelCase : Union[str, Any] = encoder_ffn_dim
__lowerCAmelCase : List[Any] = decoder_ffn_dim
__lowerCAmelCase : Dict = encoder_layers
__lowerCAmelCase : int = decoder_layers
__lowerCAmelCase : Optional[Any] = dropout
__lowerCAmelCase : Optional[int] = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : Any = encoder_layerdrop
__lowerCAmelCase : List[str] = decoder_layerdrop
__lowerCAmelCase : Tuple = activation_function
__lowerCAmelCase : Optional[Any] = init_std
__lowerCAmelCase : Dict = use_cache
# Informer
__lowerCAmelCase : Optional[int] = attention_type
__lowerCAmelCase : Union[str, Any] = sampling_factor
__lowerCAmelCase : str = distil
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def UpperCAmelCase__ ( self : List[Any] )->Dict:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 708
|
import sys
import turtle
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :tuple[float, float] , SCREAMING_SNAKE_CASE :tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :tuple[float, float] , SCREAMING_SNAKE_CASE :tuple[float, float] , SCREAMING_SNAKE_CASE :tuple[float, float] , SCREAMING_SNAKE_CASE :int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
_UpperCAmelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
_UpperCAmelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 240
| 0
|
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_A = JukeboxTokenizer
_A = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def __magic_name__ ( self ) -> Any:
import torch
__a : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
__a : str = tokenizer(**self.metas )['input_ids']
# fmt: off
__a : int = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self ) -> int:
import torch
__a : Any = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
__a : List[Any] = tokenizer(**self.metas )['input_ids']
# fmt: off
__a : str = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 597
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
_A = 'data2vec-audio'
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.02 , _A=1E-5 , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=16 , _A=19 , _A=5 , _A=0.05 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A="sum" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=False , _A=3 , _A=2 , _A=3 , _A=None , **_A , ) -> Any:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__a : Any = hidden_size
__a : Union[str, Any] = feat_extract_activation
__a : Dict = list(_A )
__a : Optional[int] = list(_A )
__a : Optional[int] = list(_A )
__a : List[Any] = conv_bias
__a : List[Any] = num_conv_pos_embeddings
__a : List[str] = num_conv_pos_embedding_groups
__a : str = conv_pos_kernel_size
__a : Dict = len(self.conv_dim )
__a : Optional[Any] = num_hidden_layers
__a : str = intermediate_size
__a : Optional[Any] = hidden_act
__a : str = num_attention_heads
__a : List[Any] = hidden_dropout
__a : List[str] = attention_dropout
__a : List[Any] = activation_dropout
__a : Tuple = feat_proj_dropout
__a : Any = final_dropout
__a : int = layerdrop
__a : Optional[Any] = layer_norm_eps
__a : List[str] = initializer_range
__a : Any = vocab_size
__a : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Any = mask_time_prob
__a : Dict = mask_time_length
__a : Optional[int] = mask_time_min_masks
__a : int = mask_feature_prob
__a : Optional[Any] = mask_feature_length
__a : Tuple = mask_feature_min_masks
# ctc loss
__a : Optional[Any] = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# adapter
__a : Optional[Any] = add_adapter
__a : List[Any] = adapter_kernel_size
__a : Union[str, Any] = adapter_stride
__a : Union[str, Any] = num_adapter_layers
__a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a : Tuple = list(_A )
__a : Optional[int] = list(_A )
__a : Any = list(_A )
__a : int = xvector_output_dim
@property
def __magic_name__ ( self ) -> Optional[int]:
return math.prod(self.conv_stride )
| 597
| 1
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[Any] = 0
if start < end:
__lowerCAmelCase : Optional[Any] = randint(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : Dict = a[end]
__lowerCAmelCase : int = a[pivot]
__lowerCAmelCase : List[str] = temp
__lowerCAmelCase : Any = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 )
count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase )
return count
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Union[str, Any] = randint(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : List[str] = a[end]
__lowerCAmelCase : Tuple = a[pivot]
__lowerCAmelCase : str = temp
__lowerCAmelCase : Tuple = start - 1
for index in range(_lowerCamelCase , _lowerCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__lowerCAmelCase : str = new_pivot_index + 1
__lowerCAmelCase : Tuple = a[new_pivot_index]
__lowerCAmelCase : Optional[Any] = a[index]
__lowerCAmelCase : int = temp
__lowerCAmelCase : str = a[new_pivot_index + 1]
__lowerCAmelCase : List[Any] = a[end]
__lowerCAmelCase : Tuple = temp
return new_pivot_index + 1, count
a = TemporaryFile()
a = 100 # 1000 elements are to be sorted
a , a = 0, 1 # mean and standard deviation
a = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
a = np.load(outfile)
a = len(M) - 1
a = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 716
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__lowerCAmelCase : Any = [1_4_4, 1_9_2, 2_4_0]
__lowerCAmelCase : Dict = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
__lowerCAmelCase : int = [9_6, 1_2_0, 1_4_4]
__lowerCAmelCase : str = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
__lowerCAmelCase : List[str] = [6_4, 8_0, 9_6]
__lowerCAmelCase : List[Any] = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
__lowerCAmelCase : Tuple = 0.0_5
__lowerCAmelCase : List[str] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
__lowerCAmelCase : int = 5_1_2
__lowerCAmelCase : Dict = 1_6
__lowerCAmelCase : Union[str, Any] = 2_1
__lowerCAmelCase : List[Any] = '''pascal-voc-id2label.json'''
else:
__lowerCAmelCase : List[str] = 1_0_0_0
__lowerCAmelCase : Any = '''imagenet-1k-id2label.json'''
__lowerCAmelCase : Union[str, Any] = '''huggingface/label-files'''
__lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowerCAmelCase : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase : Optional[int] = idalabel
__lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( lowercase__ , lowercase__=False ):
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
__lowerCAmelCase : Dict = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__lowerCAmelCase : int = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
__lowerCAmelCase : Dict = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
__lowerCAmelCase : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
__lowerCAmelCase : int = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
__lowerCAmelCase : Optional[Any] = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
__lowerCAmelCase : List[str] = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
__lowerCAmelCase : List[Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
__lowerCAmelCase : List[Any] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__lowerCAmelCase : Union[str, Any] = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__lowerCAmelCase : Any = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
__lowerCAmelCase : List[Any] = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
__lowerCAmelCase : Optional[int] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
__lowerCAmelCase : Any = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
__lowerCAmelCase : int = name.replace(f""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if f""".global_rep.{i}.bias""" in name:
__lowerCAmelCase : Dict = name.replace(f""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
__lowerCAmelCase : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
__lowerCAmelCase : str = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
__lowerCAmelCase : Any = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
__lowerCAmelCase : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
__lowerCAmelCase : Dict = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
__lowerCAmelCase : List[str] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
__lowerCAmelCase : Dict = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
__lowerCAmelCase : Optional[Any] = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
__lowerCAmelCase : Tuple = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
__lowerCAmelCase : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
__lowerCAmelCase : Optional[Any] = '''mobilevit.''' + name
return name
def _lowercase ( lowercase__ , lowercase__ , lowercase__=False ):
if base_model:
__lowerCAmelCase : List[Any] = ''''''
else:
__lowerCAmelCase : List[str] = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
__lowerCAmelCase : Optional[int] = orig_state_dict.pop(lowercase__ )
if key[:8] == "encoder.":
__lowerCAmelCase : Dict = key[8:]
if "qkv" in key:
__lowerCAmelCase : List[str] = key.split('''.''' )
__lowerCAmelCase : Any = int(key_split[0][6:] ) - 1
__lowerCAmelCase : List[Any] = int(key_split[3] )
__lowerCAmelCase : List[Any] = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
__lowerCAmelCase : Union[str, Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__lowerCAmelCase : Union[str, Any] = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__lowerCAmelCase : Dict = val[:dim, :]
__lowerCAmelCase : List[Any] = val[dim : dim * 2, :]
__lowerCAmelCase : Optional[int] = val[-dim:, :]
else:
__lowerCAmelCase : Optional[int] = val[:dim]
__lowerCAmelCase : List[str] = val[dim : dim * 2]
__lowerCAmelCase : List[Any] = val[-dim:]
else:
__lowerCAmelCase : int = val
return orig_state_dict
def _lowercase ( ):
__lowerCAmelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase : Dict = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
__lowerCAmelCase : Optional[Any] = get_mobilevit_config(lowercase__ )
# load original state_dict
__lowerCAmelCase : Any = torch.load(lowercase__ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
__lowerCAmelCase : str = MobileViTForSemanticSegmentation(lowercase__ ).eval()
else:
__lowerCAmelCase : Optional[int] = MobileViTForImageClassification(lowercase__ ).eval()
__lowerCAmelCase : int = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCAmelCase : List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__lowerCAmelCase : List[str] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowerCAmelCase : List[Any] = model(**lowercase__ )
__lowerCAmelCase : Dict = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
__lowerCAmelCase : int = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__lowerCAmelCase : Dict = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__lowerCAmelCase : Tuple = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1E-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
__lowerCAmelCase : Optional[Any] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
__lowerCAmelCase : str = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
__lowerCAmelCase : Tuple = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
__lowerCAmelCase : Union[str, Any] = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
__lowerCAmelCase : str = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase__ , organization='''apple''' )
model.push_to_hub(lowercase__ , organization='''apple''' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 583
| 0
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __magic_name__ ( ) -> Union[str, Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__snake_case ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def __magic_name__ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def __magic_name__ ( ) -> int:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__snake_case ):
http_head("https://huggingface.co" )
| 361
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_A : List[str] = True
from torch.cuda.amp import autocast
_A : Any = logging.getLogger(__name__)
@dataclass
class a__ :
__lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """Whether to log verbose messages or not."""}, )
__lowerCAmelCase = field(
default=2.0, metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
__lowerCAmelCase = field(
default=0.5, metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
__lowerCAmelCase = field(
default=0.999995, metadata={"""help""": """Decay of gumbel temperature during training."""} )
def __magic_name__ ( __snake_case : ModelArguments , __snake_case : TrainingArguments ) -> Any:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase : Optional[int] = logging.WARNING
if model_args.verbose_logging:
lowercase : Union[str, Any] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase : Union[str, Any] = logging.INFO
logger.setLevel(__snake_case )
@dataclass
class a__ :
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase = field(
default="""train""", metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
}, )
__lowerCAmelCase = field(
default="""validation""", metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
}, )
__lowerCAmelCase = field(
default="""file""", metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""}, )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__lowerCAmelCase = field(
default=1, metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
}, )
__lowerCAmelCase = field(
default=a_, metadata={"""help""": """The number of processes to use for the preprocessing."""}, )
__lowerCAmelCase = field(
default=20.0, metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class a__ :
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = "longest"
__lowerCAmelCase = None
__lowerCAmelCase = None
def __call__( self , _a ):
# reformat list to dict and set to pytorch format
lowercase : Tuple = self.feature_extractor.pad(
_a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
lowercase : str = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowercase : Tuple = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase : Any = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowercase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase : int = 1
lowercase : List[Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase : int = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_a , min_masks=2 , )
return batch
class a__ ( a_ ):
def __init__( self , *_a , _a=1 , _a=0 , _a=1.0 , **_a ):
super().__init__(*_a , **_a )
lowercase : Tuple = 0
lowercase : Optional[Any] = max_gumbel_temp
lowercase : Union[str, Any] = min_gumbel_temp
lowercase : str = gumbel_temp_decay
def __magic_name__ ( self , _a , _a ):
model.train()
lowercase : Union[str, Any] = self._prepare_inputs(_a )
if self.use_amp:
with autocast():
lowercase : Optional[Any] = self.compute_loss(_a , _a )
else:
lowercase : List[str] = self.compute_loss(_a , _a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase : Tuple = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase : Any = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
lowercase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_a ).backward()
elif self.use_apex:
with amp.scale_loss(_a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __magic_name__ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(__snake_case , __snake_case )
# Downloading and loading a dataset from the hub.
lowercase : Any = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase : Union[str, Any] = DatasetDict()
lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
lowercase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase : Tuple = DatasetDict()
lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowercase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__snake_case )
def prepare_dataset(__snake_case : Tuple ):
# check that all files have the correct sampling rate
lowercase , lowercase : Union[str, Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase : str = datasets.map(
__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
lowercase : Optional[Any] = vectorized_datasets.filter(
lambda __snake_case : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__snake_case : Union[str, Any] ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase : Any = vectorized_datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase : Union[str, Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
lowercase : Optional[Any] = WavaVecaForPreTraining(__snake_case )
lowercase : Optional[int] = DataCollatorForWavaVecaPretraining(model=__snake_case , feature_extractor=__snake_case )
lowercase : List[Any] = WavaVecaPreTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 361
| 1
|
'''simple docstring'''
def snake_case_ ( a__ : str ,a__ : str ):
"""simple docstring"""
def get_matched_characters(a__ : str ,a__ : str ) -> str:
__lowercase = []
__lowercase = min(len(_stra ) ,len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowercase = int(max(0 ,i - limit ) )
__lowercase = int(min(i + limit + 1 ,len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
__lowercase = f'{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}'
return "".join(a__ )
# matching characters
__lowercase = get_matched_characters(a__ ,a__ )
__lowercase = get_matched_characters(a__ ,a__ )
__lowercase = len(a__ )
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(a__ ,a__ ) if ca != ca] ) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4] ,stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 163
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=1000 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = range_bbox
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__lowercase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowercase = bbox[i, j, 3]
__lowercase = bbox[i, j, 1]
__lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowercase = bbox[i, j, 2]
__lowercase = bbox[i, j, 0]
__lowercase = t
__lowercase = tf.convert_to_tensor(lowerCamelCase__ )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFLayoutLMModel(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFLayoutLMForMaskedLM(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFLayoutLMForSequenceClassification(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFLayoutLMForTokenClassification(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = TFLayoutLMForQuestionAnswering(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE( __A , __A , unittest.TestCase ):
snake_case_ : Union[str, Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
snake_case_ : Tuple = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ : Optional[int] = False
snake_case_ : Optional[int] = True
snake_case_ : Tuple = 10
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFLayoutLMModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFLayoutLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def snake_case__ ( self ) -> str:
"""simple docstring"""
pass
def snake_case_ ( ):
"""simple docstring"""
__lowercase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
__lowercase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__lowercase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
__lowercase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__lowercase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = prepare_layoutlm_batch_inputs()
# forward pass
__lowercase = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
# test the sequence output on [0, :3, :3]
__lowercase = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-3 ) )
# test the pooled output on [1, :3]
__lowercase = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowerCamelCase__ , atol=1E-3 ) )
@slow
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = prepare_layoutlm_batch_inputs()
# forward pass
__lowercase = model(
input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__lowercase = outputs.loss
__lowercase = (2,)
self.assertEqual(loss.shape , lowerCamelCase__ )
# test the shape of the logits
__lowercase = outputs.logits
__lowercase = (2, 2)
self.assertEqual(logits.shape , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = prepare_layoutlm_batch_inputs()
# forward pass
__lowercase = model(
input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
# test the shape of the logits
__lowercase = outputs.logits
__lowercase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = prepare_layoutlm_batch_inputs()
# forward pass
__lowercase = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
# test the shape of the logits
__lowercase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , lowerCamelCase__ )
self.assertEqual(outputs.end_logits.shape , lowerCamelCase__ )
| 163
| 1
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
A_ = img
A_ = img.shape[1]
A_ = img.shape[0]
A_ = dst_width
A_ = dst_height
A_ = self.src_w / self.dst_w
A_ = self.src_h / self.dst_h
A_ = A_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
A_ = self.img[self.get_y(UpperCamelCase__ )][self.get_x(UpperCamelCase__ )]
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
return int(self.ratio_x * x )
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase = 800, 600
__lowerCamelCase = imread('''image_data/lena.jpg''', 1)
__lowerCamelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 288
|
'''simple docstring'''
from math import pow
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A_ = int(pow(UpperCAmelCase__, UpperCAmelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
return current_sum, solutions_count
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCAmelCase__, UpperCAmelCase__, 1, 0, 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Tuple = logging.get_logger(__name__)
A_ :str = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] ="""gpt_neox"""
def __init__( self , lowerCamelCase__=50432 , lowerCamelCase__=6144 , lowerCamelCase__=44 , lowerCamelCase__=64 , lowerCamelCase__=24576 , lowerCamelCase__="gelu" , lowerCamelCase__=0.25 , lowerCamelCase__=10000 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=2048 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-5 , lowerCamelCase__=True , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : List[Any] =max_position_embeddings
__UpperCamelCase : Optional[Any] =hidden_size
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : Union[str, Any] =num_attention_heads
__UpperCamelCase : int =intermediate_size
__UpperCamelCase : Optional[int] =hidden_act
__UpperCamelCase : str =rotary_pct
__UpperCamelCase : List[Any] =rotary_emb_base
__UpperCamelCase : Union[str, Any] =attention_dropout
__UpperCamelCase : Optional[Any] =hidden_dropout
__UpperCamelCase : int =classifier_dropout
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : Any =layer_norm_eps
__UpperCamelCase : Dict =use_cache
__UpperCamelCase : Optional[Any] =tie_word_embeddings
__UpperCamelCase : int =use_parallel_residual
__UpperCamelCase : str =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def __lowercase ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
__UpperCamelCase : Union[str, Any] =self.rope_scaling.get('type' , lowerCamelCase__ )
__UpperCamelCase : Tuple =self.rope_scaling.get('factor' , lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 718
|
A_ :str = '''Tobias Carryer'''
from time import time
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=int(time() ) ): # noqa: B008
"""simple docstring"""
__UpperCamelCase : Optional[int] =multiplier
__UpperCamelCase : str =increment
__UpperCamelCase : Union[str, Any] =modulo
__UpperCamelCase : str =seed
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
A_ :Optional[int] = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 154
| 0
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__ ( datasets.BeamBasedBuilder):
"""simple docstring"""
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=UpperCamelCase_ , )
def a__ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def a__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase_ )
class UpperCamelCase__ ( datasets.BeamBasedBuilder):
"""simple docstring"""
def a__ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=UpperCamelCase_ , )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def a__ ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase_ )
def A ( ) -> Tuple:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def A ( ) -> Dict:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class UpperCamelCase__ ( a_):
"""simple docstring"""
@require_beam
def a__ ( self : Tuple ):
'''simple docstring'''
__magic_name__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__magic_name__ = DummyBeamDataset(cache_dir=UpperCamelCase_ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase_ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__magic_name__ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase_ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def a__ ( self : List[str] ):
'''simple docstring'''
import apache_beam as beam
__magic_name__ = beam.io.parquetio.WriteToParquet
__magic_name__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__magic_name__ = DummyBeamDataset(cache_dir=UpperCamelCase_ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
__magic_name__ = partial(UpperCamelCase_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase_ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase_ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__magic_name__ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__magic_name__ = DummyBeamDataset(cache_dir=UpperCamelCase_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a__ ( self : List[Any] ):
'''simple docstring'''
__magic_name__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__magic_name__ = NestedBeamDataset(cache_dir=UpperCamelCase_ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase_ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
__magic_name__ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase_ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 545
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCamelCase__ ( a_):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = []
def a__ ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , **UpperCamelCase_ : int ):
'''simple docstring'''
self.events.append('on_init_end' )
def a__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , **UpperCamelCase_ : str ):
'''simple docstring'''
self.events.append('on_train_begin' )
def a__ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.events.append('on_train_end' )
def a__ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.events.append('on_epoch_begin' )
def a__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_epoch_end' )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.events.append('on_step_begin' )
def a__ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_step_end' )
def a__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.events.append('on_evaluate' )
def a__ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ):
'''simple docstring'''
self.events.append('on_predict' )
def a__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , **UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
self.events.append('on_save' )
def a__ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
self.events.append('on_log' )
def a__ ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_prediction_step' )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
def a__ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def a__ ( self : List[str] , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Optional[int]=6_4 , UpperCamelCase_ : Optional[int]=6_4 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[Any]=False , **UpperCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = RegressionDataset(length=UpperCamelCase_ )
__magic_name__ = RegressionDataset(length=UpperCamelCase_ )
__magic_name__ = RegressionModelConfig(a=UpperCamelCase_ , b=UpperCamelCase_ )
__magic_name__ = RegressionPreTrainedModel(UpperCamelCase_ )
__magic_name__ = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase_ , report_to=[] , **UpperCamelCase_ )
return Trainer(
UpperCamelCase_ , UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , callbacks=UpperCamelCase_ , )
def a__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
'''simple docstring'''
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
# Order doesn't matter
__magic_name__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
__magic_name__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , cba.__class__ )
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(cba.__class__ , UpperCamelCase_ )
else:
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = ['on_init_end', 'on_train_begin']
__magic_name__ = 0
__magic_name__ = len(trainer.get_eval_dataloader() )
__magic_name__ = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(UpperCamelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def a__ ( self : List[Any] ):
'''simple docstring'''
__magic_name__ = self.get_trainer()
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# Callbacks passed at init are added to the default callbacks
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__magic_name__ = self.get_trainer(disable_tqdm=UpperCamelCase_ )
__magic_name__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__magic_name__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(cb.__class__ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# We can also add, pop, or remove by instance
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
__magic_name__ = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def a__ ( self : Any ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=UpperCamelCase_ )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# Independent log/save/eval
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# A bit of everything
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCamelCase_ ) in warn_mock.call_args[0][0]
| 545
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Optional[Any] = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a_ : str = "pt"
elif is_tf_available():
a_ : Any = "tf"
else:
a_ : List[str] = "jax"
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =PerceiverTokenizer
__UpperCamelCase =False
def UpperCamelCase ( self : str ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def UpperCamelCase ( self : List[str] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[Any]=False , snake_case__ : Optional[Any]=2_0 , snake_case__ : str=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for i in range(len(snake_case__ ) ):
try:
SCREAMING_SNAKE_CASE = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE = list(filter(lambda snake_case__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , snake_case__ ) )
SCREAMING_SNAKE_CASE = list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case__ ) , snake_case__ ) )
if max_length is not None and len(snake_case__ ) > max_length:
SCREAMING_SNAKE_CASE = toks[:max_length]
if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0:
while len(snake_case__ ) < min_length:
SCREAMING_SNAKE_CASE = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
if " " not in output_txt and len(snake_case__ ) > 1:
SCREAMING_SNAKE_CASE = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE = ' ' + output_txt
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
return output_txt, output_ids
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE = 'Unicode €.'
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ )
SCREAMING_SNAKE_CASE = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , snake_case__ )
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , '[CLS]Unicode €.[SEP]' )
SCREAMING_SNAKE_CASE = tokenizer('e è é ê ë' )
SCREAMING_SNAKE_CASE = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , snake_case__ )
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
SCREAMING_SNAKE_CASE = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , snake_case__ )
self.assertIn('attention_mask' , snake_case__ )
self.assertNotIn('decoder_input_ids' , snake_case__ )
self.assertNotIn('decoder_attention_mask' , snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE = [
'Summary of the text.',
'Another summary.',
]
SCREAMING_SNAKE_CASE = tokenizer(
text_target=snake_case__ , max_length=3_2 , padding='max_length' , truncation=snake_case__ , return_tensors=snake_case__ )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = ' He is very happy, UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
shutil.rmtree(snake_case__ )
SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(snake_case__ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
SCREAMING_SNAKE_CASE = json.load(snake_case__ )
with open(os.path.join(snake_case__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
SCREAMING_SNAKE_CASE = json.load(snake_case__ )
SCREAMING_SNAKE_CASE = [F"""<extra_id_{i}>""" for i in range(1_2_5 )]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
'an_additional_special_token'
]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(snake_case__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(snake_case__ , snake_case__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
snake_case__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=snake_case__ )]
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
snake_case__ , additional_special_tokens=snake_case__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def UpperCamelCase ( self : str ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_string(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
| 439
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = args.pruning_method
SCREAMING_SNAKE_CASE = args.threshold
SCREAMING_SNAKE_CASE = args.model_name_or_path.rstrip('/' )
SCREAMING_SNAKE_CASE = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
SCREAMING_SNAKE_CASE = torch.load(os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) )
SCREAMING_SNAKE_CASE = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
SCREAMING_SNAKE_CASE = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE = MagnitudeBinarizer.apply(inputs=_UpperCamelCase , threshold=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[f"""{prefix_}mask_scores"""]
SCREAMING_SNAKE_CASE = TopKBinarizer.apply(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[f"""{prefix_}mask_scores"""]
SCREAMING_SNAKE_CASE = ThresholdBinarizer.apply(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[f"""{prefix_}mask_scores"""]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -0.1, 1.1
SCREAMING_SNAKE_CASE = torch.sigmoid(_UpperCamelCase )
SCREAMING_SNAKE_CASE = s * (r - l) + l
SCREAMING_SNAKE_CASE = s_bar.clamp(min=0.0 , max=1.0 )
SCREAMING_SNAKE_CASE = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
SCREAMING_SNAKE_CASE = os.path.join(
os.path.dirname(_UpperCamelCase ) , f"""bertarized_{os.path.basename(_UpperCamelCase )}""" )
if not os.path.isdir(_UpperCamelCase ):
shutil.copytree(_UpperCamelCase , _UpperCamelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
a_ : Dict = parser.parse_args()
main(args)
| 439
| 1
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a ( _UpperCAmelCase , unittest.TestCase ):
_snake_case : List[Any] = DebertaVaTokenizer
_snake_case : str = DebertaVaTokenizerFast
_snake_case : Dict = True
_snake_case : List[Any] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = DebertaVaTokenizer(A_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = """this is a test"""
_UpperCAmelCase = """this is a test"""
return input_text, output_text
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = """<pad>"""
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(A_ ) , 3_0001 )
def lowerCAmelCase_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def lowerCAmelCase_ ( self : List[str] ):
# fmt: off
_UpperCAmelCase = """ \tHeLLo!how \n Are yoU? """
_UpperCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
_UpperCAmelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ )
_UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCAmelCase_ ( self : str ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : List[str] ):
# fmt: off
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
_UpperCAmelCase = DebertaVaTokenizer(A_ , split_by_punct=A_ )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = DebertaVaTokenizerFast(A_ , split_by_punct=A_ )
_UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
_UpperCAmelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def lowerCAmelCase_ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
_UpperCAmelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
_UpperCAmelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# fmt: off
_UpperCAmelCase = """ \tHeLLo!how \n Are yoU? """
_UpperCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
_UpperCAmelCase = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
_UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
_UpperCAmelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(A_ )
_UpperCAmelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = """This is a test"""
_UpperCAmelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
_UpperCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
_UpperCAmelCase = DebertaVaTokenizer(A_ , keep_accents=A_ )
_UpperCAmelCase = DebertaVaTokenizerFast(A_ , keep_accents=A_ )
_UpperCAmelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
# fmt: off
_UpperCAmelCase = """I was born in 92000, and this is falsé."""
_UpperCAmelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
_UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
_UpperCAmelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = DebertaVaTokenizer(A_ )
_UpperCAmelCase = tokenizer.encode("""sequence builders""" )
_UpperCAmelCase = tokenizer.encode("""multi-sequence build""" )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A_ )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , A_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , A_ , )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# fmt: off
_UpperCAmelCase = {"""input_ids""": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 705
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return [ord(lowercase ) - 96 for elem in plain]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ ,lowercase )
print("""Decoded:""" ,decode(lowercase ) )
if __name__ == "__main__":
main()
| 275
| 0
|
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41
|
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->bool:
if len(_lowerCamelCase ) == 0:
return False
_UpperCAmelCase =len(_lowerCamelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _lowerCamelCase )
else:
return binary_search(a_list[midpoint + 1 :] , _lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Tuple = input('Enter numbers separated by comma:\n').strip()
snake_case__ : str = [int(item.strip()) for item in user_input.split(',')]
snake_case__ : Union[str, Any] = int(input('Enter the number to be found in the list:\n').strip())
snake_case__ : str = '' if binary_search(sequence, target) else 'not '
print(F"""{target} was {not_str}found in {sequence}""")
| 408
| 0
|
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str=0 ) -> Tuple:
'''simple docstring'''
if name is None:
_snake_case = None
else:
_snake_case = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
_snake_case = fmt.format(UpperCamelCase__ )
# Print and recurse (if needed).
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if msg is not None:
print(UpperCamelCase__ )
for k in val.keys():
recursive_print(UpperCamelCase__ , val[k] , spaces + 2 )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
print(UpperCamelCase__ , ':' , val.size() )
else:
print(UpperCamelCase__ , ':' , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_snake_case = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_snake_case = (num_heads, hidden_size, num_splits) + input_shape[1:]
_snake_case = param.view(*UpperCamelCase__ )
_snake_case = param.transpose(0 , 2 )
_snake_case = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_snake_case = (num_heads, num_splits, hidden_size) + input_shape[1:]
_snake_case = param.view(*UpperCamelCase__ )
_snake_case = param.transpose(0 , 1 ).contiguous()
_snake_case = param.view(*UpperCamelCase__ )
return param
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
_snake_case = {}
# old versions did not store training args
_snake_case = input_state_dict.get('args' , UpperCamelCase__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_snake_case = ds_args.padded_vocab_size
_snake_case = ds_args.max_position_embeddings
_snake_case = ds_args.hidden_size
_snake_case = ds_args.num_layers
_snake_case = ds_args.num_attention_heads
_snake_case = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_snake_case = config.n_head
# The hidden_size per head.
_snake_case = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_snake_case = input_state_dict['checkpoint_version']
else:
_snake_case = 0.0
# The model.
_snake_case = input_state_dict['model']
# The language model.
_snake_case = model['language_model']
# The embeddings.
_snake_case = lm['embedding']
# The word embeddings.
_snake_case = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
_snake_case = word_embeddings[: config.vocab_size, :]
_snake_case = word_embeddings
# The position embeddings.
_snake_case = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_snake_case = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_snake_case = pos_embeddings
# The transformer.
_snake_case = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
_snake_case = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_snake_case = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_snake_case = layer_re.match(UpperCamelCase__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_snake_case = int(m.group(1 ) )
# The name of the operation.
_snake_case = m.group(2 )
# Is it a weight or a bias?
_snake_case = m.group(3 )
# The name of the layer.
_snake_case = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_snake_case = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
_snake_case = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_snake_case = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = causal_mask
# Insert a "dummy" tensor for masked_bias.
_snake_case = torch.tensor(-1e4 , dtype=torch.floataa )
_snake_case = masked_bias
_snake_case = fix_query_key_value_ordering(UpperCamelCase__ , UpperCamelCase__ , 3 , UpperCamelCase__ , UpperCamelCase__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_snake_case = out_val.transpose(0 , 1 ).contiguous()
# Store.
_snake_case = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_snake_case = fix_query_key_value_ordering(UpperCamelCase__ , UpperCamelCase__ , 3 , UpperCamelCase__ , UpperCamelCase__ )
# Store. No change of shape.
_snake_case = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_snake_case = megatron_to_transformers[op_name]
_snake_case = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_snake_case = megatron_to_transformers[op_name]
_snake_case = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_snake_case = transformer['final_layernorm.weight']
_snake_case = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
_snake_case = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=UpperCamelCase__ , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=UpperCamelCase__ , help='An optional config json file describing the pre-trained model.' , )
_snake_case = parser.parse_args()
# Extract the basename.
_snake_case = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
else:
_snake_case = torch.load(args.path_to_checkpoint , map_location='cpu' )
_snake_case = input_state_dict.get('args' , UpperCamelCase__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_snake_case = 'gelu_fast'
elif ds_args.openai_gelu:
_snake_case = 'gelu_new'
else:
_snake_case = 'gelu'
else:
# in the very early days this used to be "gelu_new"
_snake_case = 'gelu_new'
# Spell out all parameters in case the defaults change.
_snake_case = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=UpperCamelCase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=UpperCamelCase__ , summary_activation=UpperCamelCase__ , summary_proj_to_labels=UpperCamelCase__ , summary_first_dropout=0.1 , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_snake_case = GPTaConfig.from_json_file(args.config_file )
_snake_case = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
_snake_case = convert_megatron_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCamelCase__ , UpperCamelCase__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_snake_case = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_snake_case = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
_snake_case = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_snake_case = 'gpt2'
_snake_case = AutoTokenizer.from_pretrained(UpperCamelCase__ )
_snake_case = type(UpperCamelCase__ ).__name__
_snake_case = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(UpperCamelCase__ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(UpperCamelCase__ )
# Store the state_dict to file.
_snake_case = os.path.join(UpperCamelCase__ , 'pytorch_model.bin' )
print(F'''Saving checkpoint to \"{output_checkpoint_file}\"''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 705
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 541
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.