code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from math import sqrt
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 1 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCamelCase : str = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : str
lowerCAmelCase__ : List[str]
lowerCAmelCase__ : Optional[List[str]]
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : List[int]
lowerCAmelCase__ : List[int]
lowerCAmelCase__ : Optional[List[int]] = None
lowerCAmelCase__ : Optional[List[int]] = None
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = """train"""
lowerCAmelCase__ : Any = """dev"""
lowerCAmelCase__ : List[str] = """test"""
class __lowerCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Dict , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCamelCase__ (UpperCamelCase : str ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[InputExample] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str=False , UpperCamelCase : Dict="[CLS]" , UpperCamelCase : Optional[int]=1 , UpperCamelCase : int="[SEP]" , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Tuple=0 , UpperCamelCase : Dict=0 , UpperCamelCase : List[Any]=-100 , UpperCamelCase : int=0 , UpperCamelCase : str=True , ):
'''simple docstring'''
lowercase__ = {label: i for i, label in enumerate(UpperCamelCase )}
lowercase__ = []
for ex_index, example in enumerate(UpperCamelCase ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' , UpperCamelCase , len(UpperCamelCase ) )
lowercase__ = []
lowercase__ = []
for word, label in zip(example.words , example.labels ):
lowercase__ = tokenizer.tokenize(UpperCamelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCamelCase ) > 0:
tokens.extend(UpperCamelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCamelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowercase__ = tokenizer.num_special_tokens_to_add()
if len(UpperCamelCase ) > max_seq_length - special_tokens_count:
lowercase__ = tokens[: (max_seq_length - special_tokens_count)]
lowercase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowercase__ = [sequence_a_segment_id] * len(UpperCamelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowercase__ = [cls_token] + tokens
lowercase__ = [pad_token_label_id] + label_ids
lowercase__ = [cls_token_segment_id] + segment_ids
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowercase__ = [1 if mask_padding_with_zero else 0] * len(UpperCamelCase )
# Zero-pad up to the sequence length.
lowercase__ = max_seq_length - len(UpperCamelCase )
if pad_on_left:
lowercase__ = ([pad_token] * padding_length) + input_ids
lowercase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowercase__ = ([pad_token_segment_id] * padding_length) + segment_ids
lowercase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCamelCase ) == max_seq_length
assert len(UpperCamelCase ) == max_seq_length
assert len(UpperCamelCase ) == max_seq_length
assert len(UpperCamelCase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(UpperCamelCase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(UpperCamelCase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(UpperCamelCase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(UpperCamelCase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(UpperCamelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = None
features.append(
InputFeatures(
input_ids=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , label_ids=UpperCamelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[InputFeatures]
lowerCAmelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__(self : str , UpperCamelCase : TokenClassificationTask , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : int=False , UpperCamelCase : Split = Split.train , ):
'''simple docstring'''
lowercase__ = os.path.join(
UpperCamelCase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(UpperCamelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + '''.lock'''
with FileLock(UpperCamelCase ):
if os.path.exists(UpperCamelCase ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowercase__ = torch.load(UpperCamelCase )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowercase__ = token_classification_task.read_examples_from_file(UpperCamelCase , UpperCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , UpperCamelCase )
def __len__(self : Tuple ):
'''simple docstring'''
return len(self.features )
def __getitem__(self : Tuple , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : List[InputFeatures]
lowerCAmelCase__ : int = -100
def __init__(self : Any , UpperCamelCase : TokenClassificationTask , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Split = Split.train , ):
'''simple docstring'''
lowercase__ = token_classification_task.read_examples_from_file(UpperCamelCase , UpperCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = tf.data.Dataset.from_generator(
UpperCamelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowercase__ = tf.data.Dataset.from_generator(
UpperCamelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__(self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__(self : int , UpperCamelCase : Dict ):
'''simple docstring'''
return self.features[i]
| 2 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
'''simple docstring'''
from typing import Any
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
A : dict = {}
A : dict = {}
for state in states_space:
A : Tuple = observations_space[0]
A : List[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
A : Any = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__ ) ):
A : Dict = observations_space[o]
A : Any = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
A : List[Any] = ''''''
A : Optional[Any] = -1
for k_state in states_space:
A : Dict = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
A : Any = probability
A : Union[str, Any] = k_state
# Update probabilities and pointers dicts
A : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
A : Tuple = arg_max
# The final observation
A : Union[str, Any] = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
A : Dict = ''''''
A : int = -1
for k_state in states_space:
A : Tuple = probabilities[(k_state, final_observation)]
if probability > max_probability:
A : Optional[int] = probability
A : List[Any] = k_state
A : Union[str, Any] = arg_max
# Process pointers backwards
A : int = last_state
A : Optional[Any] = []
for o in range(len(snake_case__ ) - 1 , -1 , -1 ):
result.append(snake_case__ )
A : Tuple = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__ )
_validate_dicts(
snake_case__ , snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
_validate_list(snake_case__ , '''observations_space''' )
_validate_list(snake_case__ , '''states_space''' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if not isinstance(_object , snake_case__ ):
A : List[str] = F'{var_name} must be a list'
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__ ):
A : Optional[int] = F'{var_name} must be a list of strings'
raise ValueError(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
_validate_dict(snake_case__ , '''initial_probabilities''' , snake_case__ )
_validate_nested_dict(snake_case__ , '''transition_probabilities''' )
_validate_nested_dict(snake_case__ , '''emission_probabilities''' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
_validate_dict(_object , snake_case__ , snake_case__ )
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ):
'''simple docstring'''
if not isinstance(_object , snake_case__ ):
A : Union[str, Any] = F'{var_name} must be a dict'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ):
A : Union[str, Any] = F'{var_name} all keys must be strings'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ):
A : str = '''nested dictionary ''' if nested else ''''''
A : List[str] = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 0 |
'''simple docstring'''
import numpy as np
def a_ ( lowerCamelCase : np.ndarray , lowerCamelCase : float ):
return np.where(vector > 0 , lowerCamelCase , (alpha * (np.exp(lowerCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase__ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ = '''ResNetConfig'''
# Base docstring
UpperCAmelCase__ = '''microsoft/resnet-50'''
UpperCAmelCase__ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase__ = '''microsoft/resnet-50'''
UpperCAmelCase__ = '''tiger cat'''
UpperCAmelCase__ = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 3 , UpperCAmelCase = 1 , UpperCAmelCase = "relu" ) -> Any:
super().__init__()
_lowercase =nn.Convad(
UpperCAmelCase , UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=kernel_size // 2 , bias=UpperCAmelCase )
_lowercase =nn.BatchNormad(UpperCAmelCase )
_lowercase =ACTaFN[activation] if activation is not None else nn.Identity()
def __A (self , UpperCAmelCase ) -> Tensor:
_lowercase =self.convolution(UpperCAmelCase )
_lowercase =self.normalization(UpperCAmelCase )
_lowercase =self.activation(UpperCAmelCase )
return hidden_state
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase ) -> str:
super().__init__()
_lowercase =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_lowercase =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_lowercase =config.num_channels
def __A (self , UpperCAmelCase ) -> Tensor:
_lowercase =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_lowercase =self.embedder(UpperCAmelCase )
_lowercase =self.pooler(UpperCAmelCase )
return embedding
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 ) -> Dict:
super().__init__()
_lowercase =nn.Convad(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , stride=UpperCAmelCase , bias=UpperCAmelCase )
_lowercase =nn.BatchNormad(UpperCAmelCase )
def __A (self , UpperCAmelCase ) -> Tensor:
_lowercase =self.convolution(UpperCAmelCase )
_lowercase =self.normalization(UpperCAmelCase )
return hidden_state
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = "relu" ) -> Optional[Any]:
super().__init__()
_lowercase =in_channels != out_channels or stride != 1
_lowercase =(
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_lowercase =nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , activation=UpperCAmelCase ) , )
_lowercase =ACTaFN[activation]
def __A (self , UpperCAmelCase ) -> List[str]:
_lowercase =hidden_state
_lowercase =self.layer(UpperCAmelCase )
_lowercase =self.shortcut(UpperCAmelCase )
hidden_state += residual
_lowercase =self.activation(UpperCAmelCase )
return hidden_state
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = "relu" , UpperCAmelCase = 4 ) -> Any:
super().__init__()
_lowercase =in_channels != out_channels or stride != 1
_lowercase =out_channels // reduction
_lowercase =(
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_lowercase =nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase ) , )
_lowercase =ACTaFN[activation]
def __A (self , UpperCAmelCase ) -> Dict:
_lowercase =hidden_state
_lowercase =self.layer(UpperCAmelCase )
_lowercase =self.shortcut(UpperCAmelCase )
hidden_state += residual
_lowercase =self.activation(UpperCAmelCase )
return hidden_state
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 2 , ) -> List[Any]:
super().__init__()
_lowercase =ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_lowercase =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , activation=config.hidden_act ) , *[layer(UpperCAmelCase , UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __A (self , UpperCAmelCase ) -> Tensor:
_lowercase =input
for layer in self.layers:
_lowercase =layer(UpperCAmelCase )
return hidden_state
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase ) -> List[str]:
super().__init__()
_lowercase =nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowercase =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase ) )
def __A (self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ) -> BaseModelOutputWithNoAttention:
_lowercase =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase =hidden_states + (hidden_state,)
_lowercase =stage_module(UpperCAmelCase )
if output_hidden_states:
_lowercase =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase , )
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ResNetConfig
SCREAMING_SNAKE_CASE__ = '''resnet'''
SCREAMING_SNAKE_CASE__ = '''pixel_values'''
SCREAMING_SNAKE_CASE__ = True
def __A (self , UpperCAmelCase ) -> Union[str, Any]:
if isinstance(UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __A (self , UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =value
UpperCAmelCase__ = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase__ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , lowerCAmelCase , )
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase ) -> str:
super().__init__(UpperCAmelCase )
_lowercase =config
_lowercase =ResNetEmbeddings(UpperCAmelCase )
_lowercase =ResNetEncoder(UpperCAmelCase )
_lowercase =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =self.embedder(UpperCAmelCase )
_lowercase =self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
_lowercase =encoder_outputs[0]
_lowercase =self.pooler(UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCAmelCase , )
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase ) -> int:
super().__init__(UpperCAmelCase )
_lowercase =config.num_labels
_lowercase =ResNetModel(UpperCAmelCase )
# classification head
_lowercase =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> ImageClassifierOutputWithNoAttention:
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =self.resnet(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
_lowercase =outputs.pooler_output if return_dict else outputs[1]
_lowercase =self.classifier(UpperCAmelCase )
_lowercase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase ='''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase ='''single_label_classification'''
else:
_lowercase ='''multi_label_classification'''
if self.config.problem_type == "regression":
_lowercase =MSELoss()
if self.num_labels == 1:
_lowercase =loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowercase =loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowercase =BCEWithLogitsLoss()
_lowercase =loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
_lowercase =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , lowerCAmelCase , )
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
def __init__(self , UpperCAmelCase ) -> Tuple:
super().__init__(UpperCAmelCase )
super()._init_backbone(UpperCAmelCase )
_lowercase =[config.embedding_size] + config.hidden_sizes
_lowercase =ResNetEmbeddings(UpperCAmelCase )
_lowercase =ResNetEncoder(UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> BackboneOutput:
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase =self.embedder(UpperCAmelCase )
_lowercase =self.encoder(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
_lowercase =outputs.hidden_states
_lowercase =()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_lowercase =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase , )
| 5 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCAmelCase ( a__ ) -> Tuple:
__a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> List[Any]:
__a , __a = emb.weight.shape
__a = nn.Linear(a__ , a__ , bias=a__ )
__a = emb.weight.data
return lin_layer
def __lowerCAmelCase ( a__ , a__=None ) -> List[Any]:
__a = {}
for old_key in state_dict.keys():
__a = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__a = key.replace('''moe_layer.experts.0''' , F"""ffn.experts.expert_{expert_idx}""" )
else:
__a = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
__a = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
__a = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
__a = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
__a = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
__a = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
__a = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
__a = state_dict[old_key]
return new_dict
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ = WEIGHTS_NAME ) -> Any:
__a = []
__a = 0
os.makedirs(a__ , exist_ok=a__ )
for expert in range(a__ ):
__a = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(a__ ):
__a = torch.load(a__ )['''model''']
remove_ignore_keys_(a__ )
__a = rename_fairseq_keys(a__ , a__ )
__a = os.path.join(
a__ , weights_name.replace('''.bin''' , F"""-{len(a__ )+1:05d}-of-???.bin""" ) )
torch.save(a__ , a__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(a__ )[0]].dtype )
# Add the last block
__a = os.path.join(a__ , weights_name.replace('''.bin''' , F"""-{len(a__ )+1:05d}-of-???.bin""" ) )
__a = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(a__ )
__a = rename_fairseq_keys(a__ , a__ )
__a = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(a__ ) == 1:
__a = os.path.join(a__ , a__ )
torch.save(a__ , a__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(a__ , a__ )
# Otherwise, let's build the index
__a = {}
for idx, shard in enumerate(a__ ):
__a = weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-{len(a__ ):05d}.bin""" )
__a = os.path.join(a__ , weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(a__ , os.path.join(a__ , a__ ) )
for key in shard:
__a = shard_file
# Add the metadata
__a = {'''total_size''': total_size}
__a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(a__ , a__ ) , '''w''' , encoding='''utf-8''' ) as f:
__a = json.dumps(a__ , indent=2 , sort_keys=a__ ) + '''\n'''
f.write(a__ )
return metadata, index
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A : Optional[Any] = parser.parse_args()
A , A : List[str] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
A : Dict = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
A : Optional[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 6 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
import math
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = xa
snake_case_ = xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
snake_case_ = x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
snake_case_ = x_na
snake_case_ = x_na
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 8 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
__lowerCAmelCase : Optional[int] =tuple[int, int]
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :set[int] , lowerCAmelCase__ :Mapping[EdgeT, int] ) -> None:
__SCREAMING_SNAKE_CASE : set[int] = vertices
__SCREAMING_SNAKE_CASE : dict[EdgeT, int] = {
(min(lowerCAmelCase__ ), max(lowerCAmelCase__ )): weight for edge, weight in edges.items()
}
def __magic_name__( self :List[str] , lowerCAmelCase__ :EdgeT , lowerCAmelCase__ :int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__SCREAMING_SNAKE_CASE : List[str] = weight
def __magic_name__( self :List[str] ) -> Graph:
__SCREAMING_SNAKE_CASE : Graph = Graph({min(self.vertices )} , {} )
__SCREAMING_SNAKE_CASE : EdgeT
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : EdgeT
__SCREAMING_SNAKE_CASE : int
while len(subgraph.vertices ) < len(self.vertices ):
__SCREAMING_SNAKE_CASE : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__SCREAMING_SNAKE_CASE : Union[str, Any] = edge
__SCREAMING_SNAKE_CASE : Dict = weight
subgraph.add_edge(lowerCAmelCase__ , lowerCAmelCase__ )
return subgraph
def _UpperCamelCase ( lowercase__ = "p107_network.txt" ):
__SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.dirname(lowercase__ ) )
__SCREAMING_SNAKE_CASE : str = os.path.join(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : dict[EdgeT, int] = {}
__SCREAMING_SNAKE_CASE : list[str]
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : int = f.read().strip().split('''\n''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
__SCREAMING_SNAKE_CASE : List[str] = int(adjaceny_matrix[edgea][edgea] )
__SCREAMING_SNAKE_CASE : Graph = Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
__SCREAMING_SNAKE_CASE : Graph = graph.prims_algorithm()
__SCREAMING_SNAKE_CASE : int = sum(graph.edges.values() )
__SCREAMING_SNAKE_CASE : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 0 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "transformers", "onnx"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "transformers", "onnx"]
def __init__(self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]) ->Any:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "transformers", "onnx"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int) ->int:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "transformers", "onnx"]
def __init__(self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "transformers", "onnx"]
def __init__(self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int]) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "transformers", "onnx"]
def __init__(self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
| 10 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
def _UpperCAmelCase (UpperCamelCase__ : int ):
if num < 0:
return False
_A : int = num
_A : int = 0
while num > 0:
_A : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 0 |
def lowerCamelCase__ ( A__ : int = 10 , A__ : int = 22 ):
'''simple docstring'''
__lowerCamelCase = range(1 , A__ )
__lowerCamelCase = range(1 , A__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 12 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase : Optional[int] = 10
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
if array[i] == target:
return i
return -1
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = len(_UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE_: Optional[Any] = one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE_: Dict = two_third + 1
else:
SCREAMING_SNAKE_CASE_: List[Any] = one_third + 1
SCREAMING_SNAKE_CASE_: Any = two_third - 1
else:
return -1
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if left < right:
if right - left < precision:
return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_UpperCAmelCase , one_third - 1 , _UpperCAmelCase , _UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _UpperCAmelCase , _UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : int = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Any = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : List[Any] = ite_ternary_search(collection, target)
lowerCAmelCase : Dict = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 13 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any]) ->None:
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "EncodecFeatureExtractor"
snake_case_ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Tuple ,A : Any ,A : List[str] ):
super().__init__(A ,A )
__A = self.feature_extractor
__A = False
def UpperCamelCase_ ( self : Optional[int] ,A : Tuple=None ,A : List[str]=None ,A : Union[str, Any]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=A ,language=A ,no_timestamps=A )
def __call__( self : int ,*A : Any ,**A : Any ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A ,**A )
__A = kwargs.pop("audio" ,A )
__A = kwargs.pop("sampling_rate" ,A )
__A = kwargs.pop("text" ,A )
if len(A ) > 0:
__A = args[0]
__A = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
__A = self.tokenizer(A ,**A )
if audio is not None:
__A = self.feature_extractor(A ,*A ,sampling_rate=A ,**A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__A = audio_inputs["padding_mask"]
return inputs
def UpperCamelCase_ ( self : List[Any] ,*A : Optional[Any] ,**A : Optional[int] ):
__A = kwargs.pop("audio" ,A )
__A = kwargs.pop("padding_mask" ,A )
if len(A ) > 0:
__A = args[0]
__A = args[1:]
if audio_values is not None:
return self._decode_audio(A ,padding_mask=A )
else:
return self.tokenizer.batch_decode(*A ,**A )
def UpperCamelCase_ ( self : List[Any] ,*A : Dict ,**A : Tuple ):
return self.tokenizer.decode(*A ,**A )
def UpperCamelCase_ ( self : int ,A : Union[str, Any] ,A : Optional = None ):
__A = to_numpy(A )
__A , __A , __A = audio_values.shape
if padding_mask is None:
return list(A )
__A = to_numpy(A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A = seq_len - padding_mask.shape[-1]
__A = 1 - self.feature_extractor.padding_value
__A = np.pad(A ,((0, 0), (0, difference)) ,"constant" ,constant_values=A )
__A = audio_values.tolist()
for i in range(A ):
__A = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A = sliced_audio.reshape(A ,-1 )
return audio_values
| 15 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['OwlViTFeatureExtractor']
lowerCAmelCase_ = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _A ( UpperCamelCase_ : dict[int, list[int]]) -> list[tuple[int, int]]:
'''simple docstring'''
__lowercase = 0
__lowercase = len(UpperCamelCase_) # No of vertices in graph
__lowercase = [0] * n
__lowercase = [False] * n
def dfs(UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Optional[int], UpperCamelCase_ : Any, UpperCamelCase_ : Optional[Any]):
__lowercase = True
__lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, id_)
__lowercase = min(low[at], low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
__lowercase = min(low[at], low[to])
__lowercase = []
for i in range(UpperCamelCase_):
if not visited[i]:
dfs(UpperCamelCase_, -1, UpperCamelCase_, id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
__A ={'''mobilebert-uncased''': 5_1_2}
__A ={}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = MobileBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Union[str, Any]:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(lowercase , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**lowercase )
lowerCamelCase_ = do_lower_case
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> str:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 19 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Dict = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
lowercase : str = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
lowercase : List[str] = {
"""ctrl""": 256,
}
lowercase : int = {
"""Pregnancy""": 168629,
"""Christianity""": 7675,
"""Explain""": 106423,
"""Fitness""": 63440,
"""Saving""": 63163,
"""Ask""": 27171,
"""Ass""": 95985,
"""Joke""": 163509,
"""Questions""": 45622,
"""Thoughts""": 49605,
"""Retail""": 52342,
"""Feminism""": 164338,
"""Writing""": 11992,
"""Atheism""": 192263,
"""Netflix""": 48616,
"""Computing""": 39639,
"""Opinion""": 43213,
"""Alone""": 44967,
"""Funny""": 58917,
"""Gaming""": 40358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 77138,
"""Diet""": 36206,
"""Legal""": 11859,
"""Norman""": 4939,
"""Tip""": 72689,
"""Weight""": 52343,
"""Movies""": 46273,
"""Running""": 23425,
"""Science""": 2090,
"""Horror""": 37793,
"""Confession""": 60572,
"""Finance""": 12250,
"""Politics""": 16360,
"""Scary""": 191985,
"""Support""": 12654,
"""Technologies""": 32516,
"""Teenage""": 66160,
"""Event""": 32769,
"""Learned""": 67460,
"""Notion""": 182770,
"""Wikipedia""": 37583,
"""Books""": 6665,
"""Extract""": 76050,
"""Confessions""": 102701,
"""Conspiracy""": 75932,
"""Links""": 63674,
"""Narcissus""": 150425,
"""Relationship""": 54766,
"""Relationships""": 134796,
"""Reviews""": 41671,
"""News""": 4256,
"""Translation""": 26820,
"""multilingual""": 128406,
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : List[Any] = set()
lowercase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] = char
lowercase : Dict = set(SCREAMING_SNAKE_CASE__ )
return pairs
class __snake_case ( lowerCAmelCase ):
_a : int= VOCAB_FILES_NAMES
_a : int= PRETRAINED_VOCAB_FILES_MAP
_a : List[str]= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int= CONTROL_CODES
def __init__( self ,snake_case ,snake_case ,snake_case="<unk>" ,**snake_case ):
'''simple docstring'''
super().__init__(unk_token=snake_case ,**snake_case )
with open(snake_case ,encoding="""utf-8""" ) as vocab_handle:
lowercase : Union[str, Any] = json.load(snake_case )
lowercase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(snake_case ,encoding="""utf-8""" ) as merges_handle:
lowercase : int = merges_handle.read().split("""\n""" )[1:-1]
lowercase : Dict = [tuple(merge.split() ) for merge in merges]
lowercase : Optional[Any] = dict(zip(snake_case ,range(len(snake_case ) ) ) )
lowercase : Union[str, Any] = {}
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : Union[str, Any] = tuple(snake_case )
lowercase : Tuple = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowercase : Tuple = get_pairs(snake_case )
if not pairs:
return token
while True:
lowercase : Tuple = min(snake_case ,key=lambda snake_case : self.bpe_ranks.get(snake_case ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : List[str] = bigram
lowercase : int = []
lowercase : str = 0
while i < len(snake_case ):
try:
lowercase : str = word.index(snake_case ,snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : int = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : str = tuple(snake_case )
lowercase : List[Any] = new_word
if len(snake_case ) == 1:
break
else:
lowercase : Any = get_pairs(snake_case )
lowercase : int = """@@ """.join(snake_case )
lowercase : List[Any] = word[:-4]
lowercase : Optional[int] = word
return word
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = []
lowercase : str = re.findall(r"""\S+\n?""" ,snake_case )
for token in words:
split_tokens.extend(list(self.bpe(snake_case ).split(""" """ ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.encoder.get(snake_case ,self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.decoder.get(snake_case ,self.unk_token )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = """ """.join(snake_case ).replace("""@@ """ ,"""""" ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase : Dict = os.path.join(
snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : int = os.path.join(
snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=snake_case ,ensure_ascii=snake_case ) + """\n""" )
lowercase : Union[str, Any] = 0
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
lowercase : List[Any] = token_index
writer.write(""" """.join(snake_case ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 20 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 0 |
import argparse
from collections import defaultdict
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : int = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(lowerCamelCase_ , 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Tuple = F'''class {class_name}('''
_lowercase : int = F'''{4 * " "}def {test_name}('''
_lowercase : int = F'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : Optional[Any] = F'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : List[Any] = False
_lowercase : Optional[int] = False
_lowercase : str = False
_lowercase : Optional[int] = False
_lowercase : List[str] = 0
_lowercase : List[str] = 0
_lowercase : Any = []
for line in lines:
if line.startswith(lowerCamelCase_ ):
_lowercase : List[str] = True
elif in_class and line.startswith(lowerCamelCase_ ):
_lowercase : Tuple = True
elif in_class and in_func and (line.startswith(lowerCamelCase_ ) or line.startswith(lowerCamelCase_ )):
_lowercase : Optional[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Tuple = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(lowerCamelCase_ )
with open(lowerCamelCase_ , 'w' ) as f:
for line in new_lines:
f.write(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> Tuple:
if fail is not None:
with open(lowerCamelCase_ , 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(lowerCamelCase_ , 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(lowerCamelCase_ )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 21 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCAmelCase_ ( __lowercase : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = np.max(_outputs , axis=-1 , keepdims=__lowercase )
_UpperCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowercase )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigmoid"""
_lowerCamelCase : str = """softmax"""
_lowerCamelCase : List[Any] = """none"""
@add_end_docstrings(
lowerCAmelCase_ , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = False
_lowerCamelCase : List[Any] = ClassificationFunction.NONE
def __init__( self : Any , **snake_case_ : int ):
super().__init__(**snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowercase ( self : str , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any="" , **snake_case_ : List[Any] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
_UpperCAmelCase = tokenizer_kwargs
_UpperCAmelCase = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
_UpperCAmelCase = self.model.config.return_all_scores
if isinstance(snake_case_ , snake_case_ ) or top_k is None:
_UpperCAmelCase = top_k
_UpperCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case_ , )
if return_all_scores:
_UpperCAmelCase = None
else:
_UpperCAmelCase = 1
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_UpperCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : int ):
_UpperCAmelCase = super().__call__(*snake_case_ , **snake_case_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_UpperCAmelCase = "top_k" not in kwargs
if isinstance(args[0] , snake_case_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowercase ( self : Any , snake_case_ : Union[str, Any] , **snake_case_ : str ):
_UpperCAmelCase = self.framework
if isinstance(snake_case_ , snake_case_ ):
return self.tokenizer(**snake_case_ , return_tensors=snake_case_ , **snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1 and isinstance(inputs[0] , snake_case_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case_ , **snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return self.model(**snake_case_ )
def lowercase ( self : List[Any] , snake_case_ : int , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=1 , snake_case_ : List[Any]=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_UpperCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_UpperCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
_UpperCAmelCase = self.model.config.function_to_apply
else:
_UpperCAmelCase = ClassificationFunction.NONE
_UpperCAmelCase = model_outputs["logits"][0]
_UpperCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_UpperCAmelCase = sigmoid(snake_case_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_UpperCAmelCase = softmax(snake_case_ )
elif function_to_apply == ClassificationFunction.NONE:
_UpperCAmelCase = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_UpperCAmelCase = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case_ )
]
if not _legacy:
dict_scores.sort(key=lambda snake_case_ : x["score"] , reverse=snake_case_ )
if top_k is not None:
_UpperCAmelCase = dict_scores[:top_k]
return dict_scores
| 22 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__: Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__: str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=8 ) -> Any:
UpperCAmelCase : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : VQModel , ) -> Dict:
super().__init__()
self.register_modules(
unet=__snake_case , scheduler=__snake_case , movq=__snake_case , )
UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A ( self : Tuple , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Any , __snake_case : int , __snake_case : Optional[Any] , __snake_case : int ) -> Optional[int]:
if latents is None:
UpperCAmelCase : Optional[int] = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase : List[Any] = latents.to(__snake_case )
UpperCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def A ( self : Dict , __snake_case : Dict=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase : Tuple = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
def A ( self : List[str] , __snake_case : Tuple=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase : Dict = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase : Any = cpu_offload_with_hook(__snake_case , __snake_case , prev_module_hook=__snake_case )
# We'll offload the last model manually.
UpperCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self : List[str] ) -> Dict:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self : Tuple , __snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case : int = 512 , __snake_case : int = 512 , __snake_case : int = 100 , __snake_case : float = 4.0 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> str:
UpperCAmelCase : int = self._execution_device
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase : Union[str, Any] = torch.cat(__snake_case , dim=0 )
UpperCAmelCase : str = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase : Tuple = torch.cat(__snake_case , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Optional[Any] = image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase : int = negative_image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
UpperCAmelCase : Optional[int] = self.scheduler.timesteps
UpperCAmelCase : Dict = self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase : str = downscale_height_and_width(__snake_case , __snake_case , self.movq_scale_factor )
# create initial latent
UpperCAmelCase : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = {'''image_embeds''': image_embeds}
UpperCAmelCase : Tuple = self.unet(
sample=__snake_case , timestep=__snake_case , encoder_hidden_states=__snake_case , added_cond_kwargs=__snake_case , return_dict=__snake_case , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase : str = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase : int = variance_pred.chunk(2 )
UpperCAmelCase : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Optional[int] = self.scheduler.step(
__snake_case , __snake_case , __snake_case , generator=__snake_case , )[0]
# post-processing
UpperCAmelCase : Optional[int] = self.movq.decode(__snake_case , force_not_quantize=__snake_case )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase : List[Any] = image * 0.5 + 0.5
UpperCAmelCase : int = image.clamp(0 , 1 )
UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 23 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
snake_case_ = NewType('DataClass', Any)
snake_case_ = NewType('DataClassType', Any)
def lowerCamelCase__ ( snake_case_ : str ) -> Optional[int]:
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCamelCase__ ( snake_case_ : list ) -> Callable[[str], Any]:
__snake_case = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ , snake_case_ )
def lowerCamelCase__ ( *,
snake_case_ : Union[str, List[str]] = None , snake_case_ : str = None , snake_case_ : Any = dataclasses.MISSING , snake_case_ : Callable[[], Any] = dataclasses.MISSING , snake_case_ : dict = None , **snake_case_ : int , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case = {}
if aliases is not None:
__snake_case = aliases
if help is not None:
__snake_case = help
return dataclasses.field(metadata=snake_case_ , default=snake_case_ , default_factory=snake_case_ , **snake_case_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Iterable[DataClassType]
def __init__(self : Tuple , a__ : Union[DataClassType, Iterable[DataClassType]] , **a__ : Optional[Any] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
__snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**a__ )
if dataclasses.is_dataclass(a__ ):
__snake_case = [dataclass_types]
__snake_case = list(a__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(a__ )
@staticmethod
def a (a__ : ArgumentParser , a__ : dataclasses.Field ):
"""simple docstring"""
__snake_case = f"""--{field.name}"""
__snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , a__ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__snake_case = kwargs.pop('''aliases''' , [] )
if isinstance(a__ , a__ ):
__snake_case = [aliases]
__snake_case = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(a__ , '''UnionType''' ) and isinstance(a__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(a__ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(a__ ) not in field.type.__args__:
# filter `str` in Union
__snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case = (
field.type.__args__[0] if isinstance(a__ , field.type.__args__[1] ) else field.type.__args__[1]
)
__snake_case = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case = {}
if origin_type is Literal or (isinstance(field.type , a__ ) and issubclass(field.type , a__ )):
if origin_type is Literal:
__snake_case = field.type.__args__
else:
__snake_case = [x.value for x in field.type]
__snake_case = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__snake_case = field.default
else:
__snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case = copy(a__ )
# Hack because type=bool in argparse does not behave as we want.
__snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__snake_case = True
elif isclass(a__ ) and issubclass(a__ , a__ ):
__snake_case = field.type.__args__[0]
__snake_case = '''+'''
if field.default_factory is not dataclasses.MISSING:
__snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case = True
else:
__snake_case = field.type
if field.default is not dataclasses.MISSING:
__snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case = field.default_factory()
else:
__snake_case = True
parser.add_argument(a__ , *a__ , **a__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **a__ )
def a (self : int , a__ : DataClassType ):
"""simple docstring"""
if hasattr(a__ , '''_argument_group_name''' ):
__snake_case = self.add_argument_group(dtype._argument_group_name )
else:
__snake_case = self
try:
__snake_case = get_type_hints(a__ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(a__ ):
__snake_case = '''.'''.join(map(a__ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(a__ ):
if not field.init:
continue
__snake_case = type_hints[field.name]
self._parse_dataclass_field(a__ , a__ )
def a (self : Union[str, Any] , a__ : List[Any]=None , a__ : List[Any]=False , a__ : str=True , a__ : List[Any]=None , a__ : Tuple=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__snake_case = []
if args_filename:
args_files.append(Path(a__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case = ArgumentParser()
args_file_parser.add_argument(a__ , type=a__ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case , __snake_case = args_file_parser.parse_known_args(args=a__ )
__snake_case = vars(a__ ).get(args_file_flag.lstrip('''-''' ) , a__ )
if cmd_args_file_paths:
args_files.extend([Path(a__ ) for p in cmd_args_file_paths] )
__snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case , __snake_case = self.parse_known_args(args=a__ )
__snake_case = []
for dtype in self.dataclass_types:
__snake_case = {f.name for f in dataclasses.fields(a__ ) if f.init}
__snake_case = {k: v for k, v in vars(a__ ).items() if k in keys}
for k in keys:
delattr(a__ , a__ )
__snake_case = dtype(**a__ )
outputs.append(a__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(a__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def a (self : Dict , a__ : Dict[str, Any] , a__ : bool = False ):
"""simple docstring"""
__snake_case = set(args.keys() )
__snake_case = []
for dtype in self.dataclass_types:
__snake_case = {f.name for f in dataclasses.fields(a__ ) if f.init}
__snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__snake_case = dtype(**a__ )
outputs.append(a__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(a__ )}""" )
return tuple(a__ )
def a (self : Union[str, Any] , a__ : str , a__ : bool = False ):
"""simple docstring"""
with open(Path(a__ ) , encoding='''utf-8''' ) as open_json_file:
__snake_case = json.loads(open_json_file.read() )
__snake_case = self.parse_dict(a__ , allow_extra_keys=a__ )
return tuple(a__ )
def a (self : Union[str, Any] , a__ : str , a__ : bool = False ):
"""simple docstring"""
__snake_case = self.parse_dict(yaml.safe_load(Path(a__ ).read_text() ) , allow_extra_keys=a__ )
return tuple(a__ )
| 24 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''dpt'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=3_84 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[2, 5, 8, 11] , SCREAMING_SNAKE_CASE__="project" , SCREAMING_SNAKE_CASE__=[4, 2, 1, 0.5] , SCREAMING_SNAKE_CASE__=[96, 1_92, 3_84, 7_68] , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=-1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.4 , SCREAMING_SNAKE_CASE__=2_55 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=[1, 10_24, 24, 24] , SCREAMING_SNAKE_CASE__=[0, 1] , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
SCREAMING_SNAKE_CASE__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = BitConfig(**SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
SCREAMING_SNAKE_CASE__ : str = BitConfig(**SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
SCREAMING_SNAKE_CASE__ : Dict = backbone_featmap_shape
SCREAMING_SNAKE_CASE__ : List[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Any = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
SCREAMING_SNAKE_CASE__ : Dict = readout_type
SCREAMING_SNAKE_CASE__ : Optional[int] = reassemble_factors
SCREAMING_SNAKE_CASE__ : List[str] = neck_hidden_sizes
SCREAMING_SNAKE_CASE__ : Dict = fusion_hidden_size
SCREAMING_SNAKE_CASE__ : int = head_in_index
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Tuple = use_auxiliary_head
SCREAMING_SNAKE_CASE__ : List[Any] = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : Tuple = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE__ : List[str] = semantic_classifier_dropout
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : str = self.__class__.model_type
return output
| 25 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 0 |
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(snake_case_,snake_case_ ):
raise TypeError("""only integers accepted as input""" )
else:
_A : int = str(abs(snake_case_ ) )
_A : Dict = [list(snake_case_ ) for char in range(len(snake_case_ ) )]
for index in range(len(snake_case_ ) ):
num_transpositions[index].pop(snake_case_ )
return max(
int("""""".join(list(snake_case_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = torch.nn.Linear(10 , 10 )
__a : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
__a : List[Any] = Accelerator()
__a : int = accelerator.prepare(__a )
try:
pickle.loads(pickle.dumps(__a ) )
except Exception as e:
self.fail(f"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 27 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
'''simple docstring'''
_lowerCamelCase : str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __lowerCamelCase ( A__ ) -> bytes:
"""simple docstring"""
# Make sure the supplied data is a bytes-like object
if not isinstance(A__ , A__ ):
UpperCamelCase = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(A__ )
UpperCamelCase = ''.join(bin(A__ )[2:].zfill(8 ) for byte in data )
UpperCamelCase = len(A__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase = B'=' * ((6 - len(A__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A__ ) % 6)
else:
UpperCamelCase = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A__ ) , 6 ) ).encode()
+ padding
)
def __lowerCamelCase ( A__ ) -> bytes:
"""simple docstring"""
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(A__ , A__ ) and not isinstance(A__ , A__ ):
UpperCamelCase = (
'argument should be a bytes-like object or ASCII string, '
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(A__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A__ , A__ ):
try:
UpperCamelCase = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
UpperCamelCase = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase = encoded_data[:-padding]
UpperCamelCase = ''.join(
bin(B64_CHARSET.index(A__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase = ''.join(
bin(B64_CHARSET.index(A__ ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A__ ) , 8 )
]
return bytes(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = '''visual_bert'''
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = visual_embedding_dim
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : Any = bypass_transformer
UpperCAmelCase_ : Union[str, Any] = special_visual_initialize
| 29 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a ( snake_case__: Optional[Any]=None , snake_case__: Optional[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class lowercase__:
"""simple docstring"""
a :str = field(
metadata={'help': 'The csv file to plot.'} , )
a :bool = field(
default=UpperCAmelCase , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
a :bool = field(
default=UpperCAmelCase , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
a :bool = field(
default=UpperCAmelCase , metadata={'help': 'Disable logarithmic scale when plotting'} , )
a :bool = field(
default=UpperCAmelCase , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
a :Optional[List[str]] = list_field(
default=UpperCAmelCase , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def a ( snake_case__: Dict ):
'''simple docstring'''
try:
int(snake_case__ )
return True
except ValueError:
return False
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
try:
float(snake_case__ )
return True
except ValueError:
return False
class lowercase__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
lowercase_ = args
lowercase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
lowercase_ = csv.DictReader(SCREAMING_SNAKE_CASE_ )
for row in reader:
lowercase_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
lowercase_ = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
lowercase_ = float(row['''result'''] )
def _lowercase ( self : Dict ) -> Optional[Any]:
lowercase_ , lowercase_ = plt.subplots()
lowercase_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
lowercase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowercase_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
lowercase_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
lowercase_ = self.result_dict[model_name]['''result''']
((lowercase_) , (lowercase_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowercase_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowercase_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowercase_) , (lowercase_)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
lowercase_ = np.asarray(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[: len(SCREAMING_SNAKE_CASE_ )]
plt.scatter(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''--''' )
title_str += f''' {label_model_name} vs.'''
lowercase_ = title_str[:-4]
lowercase_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(SCREAMING_SNAKE_CASE_ )
plt.xlabel(SCREAMING_SNAKE_CASE_ )
plt.ylabel(SCREAMING_SNAKE_CASE_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser(snake_case__ )
lowercase_ = parser.parse_args_into_dataclasses()[0]
lowercase_ = Plot(args=snake_case__ )
plot.plot()
if __name__ == "__main__":
main()
| 30 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any]=False ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : int = ""
else:
_UpperCAmelCase : Any = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : str = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = dct.pop(_UpperCAmelCase )
_UpperCAmelCase : int = val
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = ViTMSNConfig()
_UpperCAmelCase : Optional[int] = 1_000
_UpperCAmelCase : str = "datasets/huggingface/label-files"
_UpperCAmelCase : List[str] = "imagenet-1k-id2label.json"
_UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase ) , "r" ) )
_UpperCAmelCase : List[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_UpperCAmelCase : Dict = 384
_UpperCAmelCase : Any = 1_536
_UpperCAmelCase : str = 6
elif "l16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = 1_024
_UpperCAmelCase : Any = 4_096
_UpperCAmelCase : List[Any] = 24
_UpperCAmelCase : Tuple = 16
_UpperCAmelCase : Optional[Any] = 0.1
elif "b4" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
_UpperCAmelCase : List[str] = 7
_UpperCAmelCase : Optional[Any] = 1_024
_UpperCAmelCase : List[str] = 4_096
_UpperCAmelCase : Optional[int] = 24
_UpperCAmelCase : Union[str, Any] = 16
_UpperCAmelCase : Optional[int] = 0.1
_UpperCAmelCase : Tuple = ViTMSNModel(_UpperCAmelCase )
_UpperCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" )["target_encoder"]
_UpperCAmelCase : List[Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
_UpperCAmelCase : str = create_rename_keys(_UpperCAmelCase , base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
_UpperCAmelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
_UpperCAmelCase : Any = ViTImageProcessor(
size=config.image_size , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
_UpperCAmelCase : Dict = image_processor(images=_UpperCAmelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_UpperCAmelCase : Tuple = model(**_UpperCAmelCase )
_UpperCAmelCase : int = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_UpperCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
_UpperCAmelCase : List[str] = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
_UpperCAmelCase : Optional[int] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
_UpperCAmelCase : List[str] = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
_UpperCAmelCase : str = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCAmelCase , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 31 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : List[Any] = re.compile(R'\s+')
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Optional[Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__A , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Dict:
"""simple docstring"""
a_ : List[Any] = [len(__A ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__A ), "line_max": max(__A )}
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Optional[Any]:
"""simple docstring"""
a_ : List[Any] = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : int=5 ) -> Any:
"""simple docstring"""
a_ : Dict = ['auto-generated', 'autogenerated', 'automatically generated']
a_ : Optional[int] = example['content'].splitlines()
for _, line in zip(range(__A ) , __A ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Tuple=5 , __A : List[str]=0.05 ) -> List[Any]:
"""simple docstring"""
a_ : List[Any] = ['unit tests', 'test file', 'configuration file']
a_ : Union[str, Any] = example['content'].splitlines()
a_ : Union[str, Any] = 0
a_ : Optional[Any] = 0
# first test
for _, line in zip(range(__A ) , __A ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
a_ : Optional[Any] = example['content'].count('\n' )
a_ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> List[str]:
"""simple docstring"""
a_ : str = ['def ', 'class ', 'for ', 'while ']
a_ : Optional[Any] = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : List[Any]=4 ) -> Optional[Any]:
"""simple docstring"""
a_ : Any = example['content'].splitlines()
a_ : List[Any] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> List[str]:
"""simple docstring"""
a_ : Optional[int] = tokenizer(example['content'] , truncation=__A )['input_ids']
a_ : Union[str, Any] = len(example['content'] ) / len(__A )
return {"ratio": ratio}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
a_ : List[Any] = {}
results.update(get_hash(__A ) )
results.update(line_stats(__A ) )
results.update(alpha_stats(__A ) )
results.update(char_token_ratio(__A ) )
results.update(is_autogenerated(__A ) )
results.update(is_config_or_test(__A ) )
results.update(has_no_keywords(__A ) )
results.update(has_few_assignments(__A ) )
return results
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str , __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not check_uniques(__A , __A ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
"""simple docstring"""
with open(__A , 'rb' ) as f_in:
with gzip.open(str(__A ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__A , __A )
os.unlink(__A )
# Settings
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : Tuple = multiprocessing.cpu_count()
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : List[str] = time.time()
UpperCAmelCase_ : Union[str, Any] = load_dataset(args.dataset_name, split='train')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
UpperCAmelCase_ : List[Any] = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
UpperCAmelCase_ : int = set(ds.unique('hash'))
UpperCAmelCase_ : List[str] = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[Any] = time.time()
UpperCAmelCase_ : Optional[int] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ , UpperCAmelCase_ : int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
UpperCAmelCase_ : int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : Dict = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : List[Any] = str(data_dir / F'file-{file_number+1:012}.json')
UpperCAmelCase_ : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 32 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase ( __snake_case : List[Any] , __snake_case : Tuple=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowercase ( __snake_case : str , __snake_case : str=0 ):
lowercase_ : Dict = []
for old_item in old_list:
lowercase_ : Optional[Any] = old_item.replace('''in_layers.0''' , '''norm1''' )
lowercase_ : List[Any] = new_item.replace('''in_layers.2''' , '''conv1''' )
lowercase_ : Any = new_item.replace('''out_layers.0''' , '''norm2''' )
lowercase_ : int = new_item.replace('''out_layers.3''' , '''conv2''' )
lowercase_ : Any = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
lowercase_ : Tuple = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
lowercase_ : Union[str, Any] = shave_segments(__snake_case , n_shave_prefix_segments=__snake_case )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowercase ( __snake_case : Union[str, Any] , __snake_case : Optional[int]=0 ):
lowercase_ : List[Any] = []
for old_item in old_list:
lowercase_ : Optional[int] = old_item
lowercase_ : Tuple = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
lowercase_ : Dict = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
lowercase_ : Optional[Any] = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
lowercase_ : Dict = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
lowercase_ : Tuple = shave_segments(__snake_case , n_shave_prefix_segments=__snake_case )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowercase ( __snake_case : str , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : Dict=None ):
assert isinstance(__snake_case , __snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase_ : List[str] = old_checkpoint[path]
lowercase_ : Union[str, Any] = old_tensor.shape[0] // 3
lowercase_ : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase_ : Optional[Any] = old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowercase_ : Optional[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase_ , lowercase_ , lowercase_ : Dict = old_tensor.split(channels // num_heads , dim=1 )
lowercase_ : int = query.reshape(__snake_case )
lowercase_ : Optional[int] = key.reshape(__snake_case )
lowercase_ : Optional[int] = value.reshape(__snake_case )
for path in paths:
lowercase_ : Optional[int] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase_ : Optional[Any] = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
lowercase_ : Optional[Any] = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
lowercase_ : str = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase_ : List[str] = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase_ : List[Any] = old_checkpoint[path['''old''']][:, :, 0]
else:
lowercase_ : str = old_checkpoint[path['''old''']]
def lowercase ( __snake_case : int , __snake_case : List[Any] ):
lowercase_ : Any = {}
lowercase_ : str = checkpoint['''time_embed.0.weight''']
lowercase_ : Tuple = checkpoint['''time_embed.0.bias''']
lowercase_ : Tuple = checkpoint['''time_embed.2.weight''']
lowercase_ : Dict = checkpoint['''time_embed.2.bias''']
lowercase_ : Tuple = checkpoint['''input_blocks.0.0.weight''']
lowercase_ : Union[str, Any] = checkpoint['''input_blocks.0.0.bias''']
lowercase_ : Dict = checkpoint['''out.0.weight''']
lowercase_ : Optional[int] = checkpoint['''out.0.bias''']
lowercase_ : Union[str, Any] = checkpoint['''out.2.weight''']
lowercase_ : Tuple = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowercase_ : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
lowercase_ : Tuple = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__snake_case )
}
# Retrieves the keys for the middle blocks only
lowercase_ : str = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
lowercase_ : Optional[int] = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__snake_case )
}
# Retrieves the keys for the output blocks only
lowercase_ : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
lowercase_ : Optional[int] = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__snake_case )
}
for i in range(1 , __snake_case ):
lowercase_ : List[str] = (i - 1) // (config['''num_res_blocks'''] + 1)
lowercase_ : str = (i - 1) % (config['''num_res_blocks'''] + 1)
lowercase_ : List[Any] = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowercase_ : List[str] = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase_ : Any = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowercase_ : Union[str, Any] = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase_ : int = renew_resnet_paths(__snake_case )
lowercase_ : List[str] = {'''old''': F'''input_blocks.{i}.0''', '''new''': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase_ : List[Any] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path, resnet_op] , config=__snake_case )
if len(__snake_case ):
lowercase_ : List[str] = renew_attention_paths(__snake_case )
lowercase_ : Optional[Any] = {
'''old''': F'''input_blocks.{i}.1''',
'''new''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : Optional[int] = {
F'''input_blocks.{i}.1.qkv.bias''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , attention_paths_to_split=__snake_case , config=__snake_case , )
lowercase_ : List[Any] = middle_blocks[0]
lowercase_ : List[str] = middle_blocks[1]
lowercase_ : Union[str, Any] = middle_blocks[2]
lowercase_ : Dict = renew_resnet_paths(__snake_case )
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , config=__snake_case )
lowercase_ : Tuple = renew_resnet_paths(__snake_case )
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , config=__snake_case )
lowercase_ : int = renew_attention_paths(__snake_case )
lowercase_ : Union[str, Any] = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__snake_case , __snake_case , __snake_case , attention_paths_to_split=__snake_case , config=__snake_case )
for i in range(__snake_case ):
lowercase_ : List[str] = i // (config['''num_res_blocks'''] + 1)
lowercase_ : Union[str, Any] = i % (config['''num_res_blocks'''] + 1)
lowercase_ : int = [shave_segments(__snake_case , 2 ) for name in output_blocks[i]]
lowercase_ : Dict = {}
for layer in output_block_layers:
lowercase_ , lowercase_ : Tuple = layer.split('''.''' )[0], shave_segments(__snake_case , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__snake_case )
else:
lowercase_ : List[Any] = [layer_name]
if len(__snake_case ) > 1:
lowercase_ : Any = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowercase_ : Optional[Any] = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowercase_ : int = renew_resnet_paths(__snake_case )
lowercase_ : Dict = renew_resnet_paths(__snake_case )
lowercase_ : Optional[Any] = {'''old''': F'''output_blocks.{i}.0''', '''new''': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase_ : int = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
lowercase_ : Any = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase_ : Any = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__snake_case ) == 2:
lowercase_ : Dict = []
if len(__snake_case ):
lowercase_ : int = renew_attention_paths(__snake_case )
lowercase_ : Union[str, Any] = {
'''old''': F'''output_blocks.{i}.1''',
'''new''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : str = {
F'''output_blocks.{i}.1.qkv.bias''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=__snake_case , )
else:
lowercase_ : Dict = renew_resnet_paths(__snake_case , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase_ : int = '''.'''.join(['''output_blocks''', str(__snake_case ), path['''old''']] )
lowercase_ : Optional[int] = '''.'''.join(['''up_blocks''', str(__snake_case ), '''resnets''', str(__snake_case ), path['''new''']] )
lowercase_ : Tuple = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__A : Tuple = parser.parse_args()
__A : Dict = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A : Any = json.loads(f.read())
__A : str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A : Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A : Any = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__A : Optional[int] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__A : Tuple = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 33 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 0 |
'''simple docstring'''
def snake_case_ (_a : int , _a : int ):
return "\n".join(
F"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 34 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 35 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_snake_case = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
_lowerCAmelCase = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
_lowerCAmelCase = {
'''ctrl''': 256,
}
_lowerCAmelCase = {
'''Pregnancy''': 16_8629,
'''Christianity''': 7675,
'''Explain''': 10_6423,
'''Fitness''': 6_3440,
'''Saving''': 6_3163,
'''Ask''': 2_7171,
'''Ass''': 9_5985,
'''Joke''': 16_3509,
'''Questions''': 4_5622,
'''Thoughts''': 4_9605,
'''Retail''': 5_2342,
'''Feminism''': 16_4338,
'''Writing''': 1_1992,
'''Atheism''': 19_2263,
'''Netflix''': 4_8616,
'''Computing''': 3_9639,
'''Opinion''': 4_3213,
'''Alone''': 4_4967,
'''Funny''': 5_8917,
'''Gaming''': 4_0358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 7_7138,
'''Diet''': 3_6206,
'''Legal''': 1_1859,
'''Norman''': 4939,
'''Tip''': 7_2689,
'''Weight''': 5_2343,
'''Movies''': 4_6273,
'''Running''': 2_3425,
'''Science''': 2090,
'''Horror''': 3_7793,
'''Confession''': 6_0572,
'''Finance''': 1_2250,
'''Politics''': 1_6360,
'''Scary''': 19_1985,
'''Support''': 1_2654,
'''Technologies''': 3_2516,
'''Teenage''': 6_6160,
'''Event''': 3_2769,
'''Learned''': 6_7460,
'''Notion''': 18_2770,
'''Wikipedia''': 3_7583,
'''Books''': 6665,
'''Extract''': 7_6050,
'''Confessions''': 10_2701,
'''Conspiracy''': 7_5932,
'''Links''': 6_3674,
'''Narcissus''': 15_0425,
'''Relationship''': 5_4766,
'''Relationships''': 13_4796,
'''Reviews''': 4_1671,
'''News''': 4256,
'''Translation''': 2_6820,
'''multilingual''': 12_8406,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = set()
lowerCAmelCase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : List[Any] = char
lowerCAmelCase__ : Optional[Any] = set(UpperCamelCase )
return pairs
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = CONTROL_CODES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[int]:
super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase )
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : int = json.load(__UpperCAmelCase )
lowerCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ : int = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : List[Any] = {}
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Optional[Any] = tuple(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Dict = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(__UpperCAmelCase ,__UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Dict = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ : Dict = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = word[:-4]
lowerCAmelCase__ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Any = re.findall(R"""\S+\n?""" ,__UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
return self.decoder.get(__UpperCAmelCase ,self.unk_token )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Any = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip()
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[Any] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" )
lowerCAmelCase__ : int = 0
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(""" """.join(__UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 37 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ : List[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Any = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCAmelCase_ : Optional[int] = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'''emoji''': True,
},
}
]
UpperCAmelCase_ : List[str] = 0
for log in Path().glob('''*.log'''):
UpperCAmelCase_ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCAmelCase_ : Optional[int] = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCAmelCase_ : List[str] = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCAmelCase_ : Optional[int] = F'''{line["duration"]:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ : Optional[Any] = []
log.unlink()
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Tuple = {}
for test in failed_tests:
UpperCAmelCase_ : Optional[Any] = test[0].split('''::''')
UpperCAmelCase_ : str = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ : str = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ : List[Any] = [test[0] for test in failed_table]
UpperCAmelCase_ : Tuple = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ : Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ : Dict = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCAmelCase_ : str = '''Too many failed tests, please see the full report in the Action results.'''
UpperCAmelCase_ : str = len(err) + 10
UpperCAmelCase_ : Optional[Any] = message[: 30_00 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
UpperCAmelCase_ : Any = '''No failed tests! 🤗'''
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCAmelCase_ : List[str] = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCAmelCase_ : Tuple = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCAmelCase_ : Dict = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
UpperCAmelCase_ : str = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
UpperCAmelCase_ : Any = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCAmelCase_ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ : Optional[Any] = row[0]
else:
UpperCAmelCase_ : List[Any] = ''''''
UpperCAmelCase_ : Tuple = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 38 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '''▁'''
_a = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_a = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_a = {'''vinai/bartpho-syllable''': 1024}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = monolingual_vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_UpperCAmelCase = {}
_UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = cnt
cnt += 1
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
_UpperCAmelCase = line.strip().split()[0]
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , ' ' ).strip()
return out_string
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(UpperCAmelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 39 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Dict=[10, 20, 30, 40] , __UpperCAmelCase : Any=[2, 2, 3, 2] , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Tuple=10 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __UpperCAmelCase : Optional[int]=[2, 3, 4] , __UpperCAmelCase : Any=None , ):
a : Union[str, Any] = parent
a : Optional[int] = batch_size
a : str = image_size
a : List[Any] = num_channels
a : List[str] = num_stages
a : Optional[int] = hidden_sizes
a : List[Any] = depths
a : int = is_training
a : Optional[int] = use_labels
a : str = intermediate_size
a : str = hidden_act
a : int = num_labels
a : Tuple = initializer_range
a : Optional[Any] = out_features
a : List[Any] = out_indices
a : Union[str, Any] = scope
def __snake_case ( self : List[str]):
a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : str = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.num_labels)
a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : int):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict):
a : int = ConvNextVaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Dict):
a : Optional[Any] = ConvNextVaForImageClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]):
a : Optional[int] = ConvNextVaBackbone(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = model(__UpperCAmelCase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
a : Any = None
a : Optional[int] = ConvNextVaBackbone(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Optional[Any] = model(__UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def __snake_case ( self : Union[str, Any]):
a : str = self.prepare_config_and_inputs()
a , a , a : Any = config_and_inputs
a : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def __snake_case ( self : Union[str, Any]):
a : int = self.prepare_config_and_inputs()
a , a , a : Optional[int] = config_and_inputs
a : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : str = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[Any] = False
def __snake_case ( self : List[str]):
a : Tuple = ConvNextVaModelTester(self)
a : str = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : str):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Any):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds")
def __snake_case ( self : Optional[int]):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings")
def __snake_case ( self : int):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking")
def __snake_case ( self : str):
pass
def __snake_case ( self : Optional[int]):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
a : Tuple = True
if model_class.__name__ in [
*get_values(__UpperCAmelCase),
*get_values(__UpperCAmelCase),
]:
continue
a : int = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.train()
a : Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a : Optional[int] = model(**__UpperCAmelCase).loss
loss.backward()
def __snake_case ( self : Dict):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
a : Optional[int] = False
a : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__UpperCAmelCase), *get_values(__UpperCAmelCase)]
or not model_class.supports_gradient_checkpointing
):
continue
a : Tuple = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.gradient_checkpointing_enable()
model.train()
a : Union[str, Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a : Optional[int] = model(**__UpperCAmelCase).loss
loss.backward()
def __snake_case ( self : Dict):
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(__UpperCAmelCase)
a : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def __snake_case ( self : Dict):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
def check_hidden_states_output(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int]):
a : Optional[int] = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : Optional[Any]):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = ConvNextVaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def lowercase ( )-> List[str]:
'''simple docstring'''
a : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Union[str, Any]):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None
@slow
def __snake_case ( self : int):
a : Optional[int] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(__UpperCAmelCase)
a : Any = self.default_image_processor
a : Union[str, Any] = prepare_img()
a : List[str] = preprocessor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a : Any = model(**__UpperCAmelCase)
# verify the logits
a : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : Union[str, Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
| 40 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_A : List[Any] =logging.get_logger(__name__)
_A : List[str] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Optional[Any] ={
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_A : Optional[Any] ={
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
a = RobertaTokenizer
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]="replace" , UpperCamelCase__: List[Any]="<s>" , UpperCamelCase__: Optional[Any]="</s>" , UpperCamelCase__: str="</s>" , UpperCamelCase__: List[Any]="<s>" , UpperCamelCase__: Union[str, Any]="<unk>" , UpperCamelCase__: Dict="<pad>" , UpperCamelCase__: Any="<mask>" , UpperCamelCase__: str=False , UpperCamelCase__: List[Any]=True , **UpperCamelCase__: int , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : int = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Union[str, Any] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Any = add_prefix_space
lowerCamelCase__ : List[Any] = """post_processor"""
lowerCamelCase__ : Optional[Any] = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
lowerCamelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
lowerCamelCase__ : List[Any] = tuple(state["""cls"""] )
lowerCamelCase__ : int = False
if state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Optional[Any] = add_prefix_space
lowerCamelCase__ : Any = True
if state.get("""trim_offsets""" , UpperCamelCase__ ) != trim_offsets:
lowerCamelCase__ : Optional[Any] = trim_offsets
lowerCamelCase__ : Tuple = True
if changes_to_apply:
lowerCamelCase__ : Optional[int] = getattr(UpperCamelCase__ , state.pop("""type""" ) )
lowerCamelCase__ : Any = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self: str , UpperCamelCase__: int ):
lowerCamelCase__ : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
lowerCamelCase__ : str = value
def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Any , **UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : int = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Any , UpperCamelCase__: int=None ):
lowerCamelCase__ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 41 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCAmelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCAmelCase_ ) , 0 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
# Check that tokenizer_type ≠ model_type
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowerCAmelCase_ , 'vocab.txt' ) )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type='bert' , use_fast=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowerCAmelCase_ , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowerCAmelCase_ , 'merges.txt' ) )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type='gpt2' , use_fast=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowerCAmelCase_ , 'vocab.txt' ) )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type='bert' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowerCAmelCase_ , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowerCAmelCase_ , 'merges.txt' ) )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type='gpt2' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase_ ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_snake_case = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCAmelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , lowerCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCAmelCase_ , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
_snake_case = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TOKENIZER_MAPPING.values()
_snake_case = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=lowerCAmelCase_ )
_snake_case = 'Hello, world. How are you?'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual('[UNK]' , tokens[0] )
_snake_case = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=lowerCAmelCase_ )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = get_tokenizer_config('bert-base-cased' )
_snake_case = config.pop('_commit_hash' , lowerCAmelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCAmelCase_ , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_snake_case = get_tokenizer_config(lowerCAmelCase_ )
self.assertDictEqual(lowerCAmelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = get_tokenizer_config(lowerCAmelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def lowerCamelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
_snake_case = CustomTokenizer.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , lowerCAmelCase_ )
# Can register in two steps
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoTokenizer.register(lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = BertTokenizerFast.from_pretrained(lowerCAmelCase_ )
bert_tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = CustomTokenizerFast.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = False
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = NewTokenizer
__lowercase = False
try:
AutoConfig.register('custom' , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
# If remote code is not set, the default is to use local
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
_snake_case = AutoTokenizer.from_pretrained('bert-base' )
def lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , revision='aaaaaa' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 42 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''PerceiverFeatureExtractor''']
__lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ) -> int:
# Load configuration defined in the metadata file
with open(_lowerCamelCase ) as metadata_file:
_lowerCAmelCase : Tuple = json.load(_lowerCamelCase )
_lowerCAmelCase : Dict = LukeConfig(use_entity_aware_attention=_lowerCamelCase ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase : List[Any] = torch.load(_lowerCamelCase ,map_location="""cpu""" )
# Load the entity vocab file
_lowerCAmelCase : Optional[int] = load_entity_vocab(_lowerCamelCase )
_lowerCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase : Optional[Any] = AddedToken("""<ent>""" ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = AddedToken("""<ent2>""" ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
_lowerCAmelCase : Optional[Any] = state_dict["""embeddings.word_embeddings.weight"""]
_lowerCAmelCase : Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_lowerCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_lowerCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase : Any = f"encoder.layer.{layer_index}.attention.self."
_lowerCAmelCase : str = state_dict[prefix + matrix_name]
_lowerCAmelCase : Dict = state_dict[prefix + matrix_name]
_lowerCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase : Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowerCAmelCase : Dict = entity_emb[entity_vocab["""[MASK]"""]]
_lowerCAmelCase : int = LukeModel(config=_lowerCamelCase ).eval()
_lowerCAmelCase , _lowerCAmelCase : str = model.load_state_dict(_lowerCamelCase ,strict=_lowerCamelCase )
if not (len(_lowerCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(_lowerCamelCase )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
_lowerCAmelCase : List[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase ,task="""entity_classification""" )
_lowerCAmelCase : int = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_lowerCAmelCase : Dict = (39, 42)
_lowerCAmelCase : Optional[Any] = tokenizer(_lowerCamelCase ,entity_spans=[span] ,add_prefix_space=_lowerCamelCase ,return_tensors="""pt""" )
_lowerCAmelCase : List[str] = model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
_lowerCAmelCase : List[str] = torch.Size((1, 42, 1024) )
_lowerCAmelCase : List[str] = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
_lowerCAmelCase : int = torch.Size((1, 42, 768) )
_lowerCAmelCase : Tuple = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_lowerCamelCase ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCAmelCase : Optional[int] = torch.Size((1, 1, 1024) )
_lowerCAmelCase : Tuple = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
_lowerCAmelCase : Optional[int] = torch.Size((1, 1, 768) )
_lowerCAmelCase : Dict = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,_lowerCamelCase ,atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Dict:
_lowerCAmelCase : List[str] = {}
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = line.rstrip().split("""\t""" )
_lowerCAmelCase : Any = index
return entity_vocab
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_a : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 44 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
"""simple docstring"""
import math
import unittest
def lowercase ( lowerCAmelCase__ : int ) -> bool:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCAmelCase ( self ):
with self.assertRaises(_a ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 45 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self , lowercase ) -> None:
lowerCAmelCase = data
lowerCAmelCase = self
lowerCAmelCase = 0
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
lowerCAmelCase = {}
def _snake_case ( self , lowercase ) -> None:
# create a new set with x as its member
lowerCAmelCase = DisjointSetTreeNode(lowercase )
def _snake_case ( self , lowercase ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
lowerCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
lowerCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _snake_case ( self , lowercase , lowercase ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCAmelCase = nodea
else:
lowerCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _snake_case ( self , lowercase , lowercase ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(lowercase ) , self.find_set(lowercase ) )
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
lowerCAmelCase = {}
def _snake_case ( self , lowercase ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCAmelCase = {}
def _snake_case ( self , lowercase , lowercase , lowercase ) -> None:
# add an edge with the given weight
self.add_node(lowercase )
self.add_node(lowercase )
lowerCAmelCase = weight
lowerCAmelCase = weight
def _snake_case ( self ) -> GraphUndirectedWeighted[T]:
lowerCAmelCase = []
lowerCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowercase : x[2] )
# creating the disjoint set
lowerCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowercase )
# MST generation
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index]
index += 1
lowerCAmelCase = disjoint_set.find_set(lowercase )
lowerCAmelCase = disjoint_set.find_set(lowercase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowercase , lowercase , lowercase )
disjoint_set.union(lowercase , lowercase )
return graph
| 46 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
'''simple docstring'''
from typing import List
import numpy as np
def _lowerCAmelCase ( _UpperCamelCase : dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={key: len(_UpperCamelCase ) for key, value in gen_kwargs.items() if isinstance(_UpperCamelCase , _UpperCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
_SCREAMING_SNAKE_CASE =max(lists_lengths.values() , default=0 )
return max(1 , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> List[range]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for group_idx in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_SCREAMING_SNAKE_CASE =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_SCREAMING_SNAKE_CASE =range(_UpperCamelCase , start + num_shards_to_add )
shards_indices_per_group.append(_UpperCamelCase )
return shards_indices_per_group
def _lowerCAmelCase ( _UpperCamelCase : dict , _UpperCamelCase : int ) -> List[dict]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =_number_of_shards_in_gen_kwargs(_UpperCamelCase )
if num_shards == 1:
return [dict(_UpperCamelCase )]
else:
_SCREAMING_SNAKE_CASE =_distribute_shards(num_shards=_UpperCamelCase , max_num_jobs=_UpperCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_UpperCamelCase , _UpperCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_UpperCamelCase ) )
]
def _lowerCAmelCase ( _UpperCamelCase : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _UpperCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _lowerCAmelCase ( _UpperCamelCase : np.random.Generator , _UpperCamelCase : dict ) -> dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={len(_UpperCamelCase ) for value in gen_kwargs.values() if isinstance(_UpperCamelCase , _UpperCamelCase )}
_SCREAMING_SNAKE_CASE ={}
for size in list_sizes:
_SCREAMING_SNAKE_CASE =list(range(_UpperCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_SCREAMING_SNAKE_CASE =dict(_UpperCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =[value[i] for i in indices_per_size[len(_UpperCamelCase )]]
return shuffled_kwargs
| 47 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> List[Any]:
super().__init__(features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : str = Sql(
cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , sql=UpperCamelCase__ , con=UpperCamelCase__ , **UpperCamelCase__ , )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : List[Any] = None
lowerCamelCase : List[str] = None
lowerCamelCase : int = None
lowerCamelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , )
# Build dataset for splits
lowerCamelCase : Any = self.builder.as_dataset(
split="train" , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Any:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowerCamelCase : int = dataset
lowerCamelCase : int = name
lowerCamelCase : Optional[int] = con
lowerCamelCase : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCamelCase : int = num_proc
lowerCamelCase : int = to_sql_kwargs
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[Any] = self.to_sql_kwargs.pop("sql" , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = self.to_sql_kwargs.pop("con" , UpperCamelCase__ )
lowerCamelCase : str = self.to_sql_kwargs.pop("index" , UpperCamelCase__ )
lowerCamelCase : str = self._write(index=UpperCamelCase__ , **self.to_sql_kwargs )
return written
def _lowercase ( self , UpperCamelCase__ ) -> str:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = args
lowerCamelCase : Tuple = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
lowerCamelCase : List[str] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCamelCase : List[Any] = batch.to_pandas()
lowerCamelCase : Any = df.to_sql(self.name , self.con , index=UpperCamelCase__ , **UpperCamelCase__ )
return num_rows or len(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> int:
lowerCamelCase : Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowerCamelCase , lowerCamelCase : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 48 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case :List[str] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Dict = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = DistilBertTokenizer
UpperCAmelCase__ = DistilBertTokenizerFast
UpperCAmelCase__ = True
@slow
def A_ ( self : str ) -> Any:
lowerCamelCase__ : List[str] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
lowerCamelCase__ : Dict = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 50 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
from pathlib import Path
import fire
def A (__A : str , __A : str , __A : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = Path(__A )
UpperCAmelCase_ = Path(__A )
dest_dir.mkdir(exist_ok=__A )
for path in src_dir.iterdir():
UpperCAmelCase_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCAmelCase_ = dest_dir.joinpath(path.name )
print(__A )
dest_path.open('''w''' ).write('''\n'''.join(__A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 51 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__ :
def __init__( self , A_ , ):
'''simple docstring'''
UpperCamelCase : int = parent
UpperCamelCase : Union[str, Any] = 13
UpperCamelCase : Any = 7
UpperCamelCase : Any = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[Any] = True
UpperCamelCase : List[Any] = 99
UpperCamelCase : List[Any] = 32
UpperCamelCase : Optional[int] = 2
UpperCamelCase : List[Any] = 4
UpperCamelCase : Optional[Any] = 37
UpperCamelCase : Optional[Any] = "gelu"
UpperCamelCase : int = 0.1
UpperCamelCase : Optional[int] = 0.1
UpperCamelCase : List[str] = 512
UpperCamelCase : Union[str, Any] = 16
UpperCamelCase : Tuple = 2
UpperCamelCase : List[Any] = 0.02
UpperCamelCase : Optional[int] = 3
UpperCamelCase : List[Any] = 4
UpperCamelCase : Optional[int] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = TFEsmModel(config=A_ )
UpperCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase : int = model(A_ )
UpperCamelCase : List[Any] = [input_ids, input_mask]
UpperCamelCase : int = model(A_ )
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Tuple = True
UpperCamelCase : str = TFEsmModel(config=A_ )
UpperCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
UpperCamelCase : Dict = model(A_ )
UpperCamelCase : Union[str, Any] = [input_ids, input_mask]
UpperCamelCase : List[Any] = model(A_ , encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
UpperCamelCase : List[str] = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = TFEsmForMaskedLM(config=A_ )
UpperCamelCase : Tuple = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_labels
UpperCamelCase : Dict = TFEsmForTokenClassification(config=A_ )
UpperCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = config_and_inputs
UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :List[Any] = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Optional[Any] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = TFEsmModelTester(self )
UpperCamelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip("Protein models do not support embedding resizing." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(A_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCamelCase : Optional[Any] = model.get_bias()
assert isinstance(A_ , A_ )
for k, v in name.items():
assert isinstance(A_ , tf.Variable )
else:
UpperCamelCase : str = model.get_output_embeddings()
assert x is None
UpperCamelCase : Dict = model.get_bias()
assert name is None
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCamelCase : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : int = model(A_ )[0]
UpperCamelCase : List[str] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , A_ )
# compare the actual values for a slice.
UpperCamelCase : str = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCamelCase : Optional[int] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : List[Any] = model(A_ )[0]
# compare the actual values for a slice.
UpperCamelCase : int = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 52 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__lowercase , __lowercase ):
return 0
elif n == 2:
return 1
else:
__UpperCamelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 2
while digits < n:
index += 1
__UpperCamelCase = len(str(fibonacci(__lowercase ) ) )
return index
def lowercase__ ( __lowercase : int = 1000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__lowercase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 53 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : str=1_3 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=9_9 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Any=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=5_1_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]="None" , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Any=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict ) -> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = DebertaVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = DebertaVaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaVaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaVaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DebertaVaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DebertaVaForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : Dict = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any = True
snake_case__ : Any = False
snake_case__ : Union[str, Any] = False
snake_case__ : List[Any] = False
snake_case__ : Any = False
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE = DebertaVaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
pass
@slow
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 54 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a_ : List[str] = """sshleifer/bart-tiny-random"""
a_ : Union[str, Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return AutoConfig.from_pretrained(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,*lowerCamelCase_ = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,*lowerCamelCase_ = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,*lowerCamelCase_ = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=UpperCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,*lowerCamelCase_ = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase ):
create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=UpperCamelCase , d=UpperCamelCase )
| 55 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 0 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a : List[str] = logging.get_logger(__name__)
a : Dict[Optional[str], Type[Formatter]] = {}
a : Dict[Optional[str], str] = {}
a : Dict[Optional[str], Exception] = {}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, ) -> List[Any]:
'''simple docstring'''
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
snake_case_ = format_type
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None ) -> Any:
'''simple docstring'''
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
a : Tuple = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
a : List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
a : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __magic_name__ ( __UpperCAmelCase ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __magic_name__ ( __UpperCAmelCase, **__UpperCAmelCase ) -> Formatter:
'''simple docstring'''
snake_case_ = get_format_type_from_alias(__UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 56 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 0 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
A : Optional[int] = "sshleifer/mar_enro_6_3_student"
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
super().setUp()
__lowerCAmelCase = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=__a , )
__lowerCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def snake_case ( self ):
MarianMTModel.from_pretrained(__a )
@slow
@require_torch_gpu
def snake_case ( self ):
__lowerCAmelCase = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
__lowerCAmelCase = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
__lowerCAmelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__a , str(__a ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__lowerCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__lowerCAmelCase = ["finetune.py"] + bash_script.split() + args
with patch.object(__a , "argv" , __a ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__a )
__lowerCAmelCase = SummarizationModule.add_model_specific_args(__a , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = main(__a )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics["val"][0]
__lowerCAmelCase = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , __a )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__a )
__lowerCAmelCase = [x for x in contents if x.endswith(".ckpt" )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __a )
__lowerCAmelCase = torch.load(__a , map_location="cpu" )
__lowerCAmelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def snake_case ( self ):
__lowerCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
__lowerCAmelCase = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_28,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
__lowerCAmelCase = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
__lowerCAmelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
__lowerCAmelCase = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__a , str(__a ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = bash_script.replace("--fp16" , "" )
__lowerCAmelCase = 6
__lowerCAmelCase = (
["distillation.py"]
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
"--gpus=1",
"--learning_rate=1e-3",
f"--num_train_epochs={epochs}",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(__a , "argv" , __a ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__a )
__lowerCAmelCase = SummarizationDistiller.add_model_specific_args(__a , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__lowerCAmelCase = distill_main(__a )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics["val"][0]
__lowerCAmelCase = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , __a )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__a )
__lowerCAmelCase = [x for x in contents if x.endswith(".ckpt" )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __a )
__lowerCAmelCase = torch.load(__a , map_location="cpu" )
__lowerCAmelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 57 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=A ).to(A )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
_SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
_SCREAMING_SNAKE_CASE = model(input_ids.to(A ) , labels=labels.to(A ) ).loss
_SCREAMING_SNAKE_CASE = -(labels.shape[-1] * loss.item())
_SCREAMING_SNAKE_CASE = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 58 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger("""transformers.models.speecht5""")
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
hf_model.apply_weight_norm()
snake_case : Optional[Any] = checkpoint["input_conv.weight_g"]
snake_case : Union[str, Any] = checkpoint["input_conv.weight_v"]
snake_case : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
snake_case : List[Any] = checkpoint[f"""upsamples.{i}.1.weight_g"""]
snake_case : Dict = checkpoint[f"""upsamples.{i}.1.weight_v"""]
snake_case : Dict = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case : Union[str, Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
snake_case : List[str] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
snake_case : Optional[int] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
snake_case : List[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
snake_case : List[str] = checkpoint["output_conv.1.weight_g"]
snake_case : Optional[int] = checkpoint["output_conv.1.weight_v"]
snake_case : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , ):
if config_path is not None:
snake_case : Any = SpeechTaHifiGanConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : List[Any] = SpeechTaHifiGanConfig()
snake_case : Tuple = SpeechTaHifiGan(__lowerCamelCase )
snake_case : Any = torch.load(__lowerCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , __lowerCamelCase , __lowerCamelCase )
snake_case : int = np.load(__lowerCamelCase )
snake_case : List[str] = stats[0].reshape(-1 )
snake_case : Dict = stats[1].reshape(-1 )
snake_case : Optional[Any] = torch.from_numpy(__lowerCamelCase ).float()
snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).float()
model.save_pretrained(__lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 59 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
"""simple docstring"""
from manim import *
class snake_case_( a__ ):
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[str] = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : str = [mem.copy() for i in range(6 )]
lowerCAmelCase : str = [mem.copy() for i in range(6 )]
lowerCAmelCase : List[str] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : Optional[Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : Optional[Any] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : Optional[int] = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase : List[str] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )]
lowerCAmelCase : Any = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : str = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase : int = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
gpu.align_to(UpperCamelCase_ , UpperCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCAmelCase : List[Any] = Text('''Model''' , font_size=2_4 )
lowerCAmelCase : Any = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , )
lowerCAmelCase : Union[str, Any] = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=2_4 , )
lowerCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=2.5 ) , Write(UpperCamelCase_ ) , Write(UpperCamelCase_ ) )
self.add(UpperCamelCase_ )
lowerCAmelCase : Dict = []
lowerCAmelCase : str = []
lowerCAmelCase : List[str] = []
for i, rect in enumerate(UpperCamelCase_ ):
lowerCAmelCase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase_ )
cpu_target.generate_target()
lowerCAmelCase : Dict = 0.46 / 4
lowerCAmelCase : int = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase_ , buff=0.0 )
cpu_targs.append(UpperCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase_ ) )
second_animations.append(MoveToTarget(UpperCamelCase_ , run_time=1.5 ) )
self.play(*UpperCamelCase_ )
self.play(*UpperCamelCase_ )
self.wait()
| 60 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : int = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase_ : Optional[int] = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[Any] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(lowercase_ ) )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = "こんにちは、世界。 \nこんばんは、㔺界。😀"
UpperCAmelCase_ : Union[str, Any] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.get_input_output_texts(lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
return text, ids
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Tuple = "こんにちは、世界。 こんばんは、㔺界。"
UpperCAmelCase_ : Dict = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
UpperCAmelCase_ : int = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
UpperCAmelCase_ : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
UpperCAmelCase_ : Tuple = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase_ : int = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Optional[int] = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
UpperCAmelCase_ : Optional[int] = "こんにちは、、、、世界。こんばんは、、、、世界。"
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : List[Any] = "こんにちは、世界。"
UpperCAmelCase_ : List[Any] = "こんばんは、㔺界。😀"
UpperCAmelCase_ : List[Any] = "こんにちは、世界。こんばんは、世界。😀"
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase_ : List[str] = tokenizer.encode("" , prefix_text=prefix_text + input_text )
UpperCAmelCase_ : str = tokenizer.encode(lowercase_ , prefix_text=lowercase_ )
UpperCAmelCase_ : List[Any] = tokenizer.decode(lowercase_ )
UpperCAmelCase_ : str = tokenizer.decode(lowercase_ )
UpperCAmelCase_ : List[str] = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : Union[str, Any] = "こんにちは、世界。"
UpperCAmelCase_ : Union[str, Any] = "こんばんは、㔺界。😀"
UpperCAmelCase_ : List[Any] = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase_ : Dict = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase_ : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase_ : Any = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase_ : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase_ : Dict = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : Optional[Any] = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : str = tokenizer(lowercase_ , prefix_text=lowercase_ ).token_type_ids
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : str = tokenizer.encode("あンいワ" )
UpperCAmelCase_ : List[Any] = tokenizer.encode("" , prefix_text="あンいワ" )
UpperCAmelCase_ : str = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : Tuple = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
UpperCAmelCase_ : Dict = tokenizer(lowercase_ , padding=lowercase_ )
UpperCAmelCase_ : int = tokenizer.batch_encode_plus(lowercase_ , padding=lowercase_ )
# fmt: off
UpperCAmelCase_ : str = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
UpperCAmelCase_ : Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase_ : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase_ )
self.assertListEqual(x_token.token_type_ids , lowercase_ )
self.assertListEqual(x_token.attention_mask , lowercase_ )
self.assertListEqual(x_token_a.input_ids , lowercase_ )
self.assertListEqual(x_token_a.token_type_ids , lowercase_ )
self.assertListEqual(x_token_a.attention_mask , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 61 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
import string
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase =''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase =string.ascii_uppercase.find(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =num - key
if num < 0:
__UpperCamelCase =num + len(string.ascii_uppercase )
__UpperCamelCase =translated + string.ascii_uppercase[num]
else:
__UpperCamelCase =translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def _UpperCAmelCase ( ):
__UpperCamelCase =input('Encrypted message: ' )
__UpperCamelCase =message.upper()
decrypt(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 62 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
lowerCAmelCase_ : List[Any] = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
lowerCAmelCase_ : Dict = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase_ : Optional[Any] = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 63 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "markuplm"
def __init__( self: Optional[Any], a_: List[str]=30_522, a_: List[str]=768, a_: Optional[int]=12, a_: List[str]=12, a_: Optional[int]=3_072, a_: str="gelu", a_: str=0.1, a_: int=0.1, a_: Dict=512, a_: List[Any]=2, a_: List[Any]=0.02, a_: Optional[Any]=1E-12, a_: Any=0, a_: Union[str, Any]=0, a_: int=2, a_: Union[str, Any]=256, a_: Tuple=1_024, a_: str=216, a_: str=1_001, a_: str=32, a_: Optional[Any]=50, a_: List[str]="absolute", a_: Dict=True, a_: int=None, **a_: Any, ):
'''simple docstring'''
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_, )
_snake_case : Optional[Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Optional[Any] = hidden_act
_snake_case : List[Any] = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Any = initializer_range
_snake_case : Any = layer_norm_eps
_snake_case : str = position_embedding_type
_snake_case : List[Any] = use_cache
_snake_case : Any = classifier_dropout
# additional properties
_snake_case : Dict = max_depth
_snake_case : str = max_xpath_tag_unit_embeddings
_snake_case : str = max_xpath_subs_unit_embeddings
_snake_case : Union[str, Any] = tag_pad_id
_snake_case : int = subs_pad_id
_snake_case : List[Any] = xpath_unit_hidden_size
| 64 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCamelCase__ = False
try:
UpperCamelCase__ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A :
def __init__(self : Optional[int] , __UpperCAmelCase : str = None , __UpperCAmelCase : list = [] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = choices
UpperCAmelCase__ = prompt
if sys.platform == "win32":
UpperCAmelCase__ = "*"
else:
UpperCAmelCase__ = "➔ "
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str = "" ) -> Optional[int]:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , __UpperCAmelCase )
else:
forceWrite(self.choices[index] , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__UpperCAmelCase )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowercase_ (self : Dict , __UpperCAmelCase : Direction , __UpperCAmelCase : int = 1 ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__UpperCAmelCase )
move_cursor(__UpperCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowercase_ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowercase_ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowercase_ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__UpperCAmelCase )] for number in range(1_0 )] )
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = int(chr(self.current_selection ) )
UpperCAmelCase__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __UpperCAmelCase )
else:
return
else:
return
def lowercase_ (self : Tuple , __UpperCAmelCase : int = 0 ) -> Union[str, Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__UpperCAmelCase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase__ = int(builtins.input() )
except ValueError:
UpperCAmelCase__ = default_choice
else:
UpperCAmelCase__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__UpperCAmelCase , "\n" )
return choice
| 65 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
"""simple docstring"""
__a = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__a = [{"type": "code", "content": INSTALL_CONTENT}]
__a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 66 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCAmelCase ="CompVis/stable-diffusion-v1-1"
__UpperCAmelCase ="CompVis/stable-diffusion-v1-2"
__UpperCAmelCase ="CompVis/stable-diffusion-v1-3"
__UpperCAmelCase ="CompVis/stable-diffusion-v1-4"
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : AutoencoderKL , a : CLIPTextModel , a : CLIPTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a : StableDiffusionSafetyChecker , a : CLIPImageProcessor , a : bool = True , ):
"""simple docstring"""
super()._init_()
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(a )
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(a )
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(a )
__lowerCamelCase = StableDiffusionPipeline(
vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , safety_checker=a , feature_extractor=a , requires_safety_checker=a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return {k: getattr(self , a ) for k in self.config.keys() if not k.startswith('''_''' )}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
self.enable_attention_slicing(a )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : Union[str, Any] , ):
"""simple docstring"""
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : Tuple , ):
"""simple docstring"""
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : str , ):
"""simple docstring"""
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : Tuple , ):
"""simple docstring"""
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : str , ):
"""simple docstring"""
__lowerCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowerCamelCase = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowerCamelCase = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowerCamelCase = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowerCamelCase = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 67 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase__ = """pt"""
elif is_tf_available():
lowerCAmelCase__ = """tf"""
else:
lowerCAmelCase__ = """jax"""
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PerceiverTokenizer
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
A__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def UpperCamelCase ( self , **lowercase ) -> PerceiverTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Tuple[str, list]:
'''simple docstring'''
A__ = []
for i in range(len(lowercase ) ):
try:
A__ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ = list(filter(lambda lowercase : re.match(R"^[ a-zA-Z]+$" , t[1] ) , lowercase ) )
A__ = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
A__ = " " + output_txt
A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.perceiver_tokenizer
A__ = "Unicode €."
A__ = tokenizer(lowercase )
A__ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , lowercase )
# decoding
A__ = tokenizer.decode(lowercase )
self.assertEqual(lowercase , "[CLS]Unicode €.[SEP]" )
A__ = tokenizer("e è é ê ë" )
A__ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , lowercase )
# decoding
A__ = tokenizer.decode(lowercase )
self.assertEqual(lowercase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.perceiver_tokenizer
A__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
A__ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
A__ = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
A__ = list(batch.input_ids.numpy()[0] )
else:
A__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.perceiver_tokenizer
A__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
A__ = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , lowercase )
self.assertIn("attention_mask" , lowercase )
self.assertNotIn("decoder_input_ids" , lowercase )
self.assertNotIn("decoder_attention_mask" , lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.perceiver_tokenizer
A__ = [
"Summary of the text.",
"Another summary.",
]
A__ = tokenizer(
text_target=lowercase , max_length=32 , padding="max_length" , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = " He is very happy, UNwant\u00E9d,running"
A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
A__ = tokenizer.__class__.from_pretrained(lowercase )
A__ = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
A__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
A__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
A__ = tokenizer.__class__.from_pretrained(lowercase )
A__ = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A__ = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
A__ = json.load(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
A__ = json.load(lowercase )
A__ = [F'<extra_id_{i}>' for i in range(125 )]
A__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
A__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(lowercase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=lowercase )]
A__ = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A__ = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
A__ = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
| 68 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
"""simple docstring"""
import math
import sys
def UpperCAmelCase ( UpperCAmelCase ) -> int:
if number != int(UpperCAmelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
snake_case_ = [-1] * (number + 1)
snake_case_ = 0
for i in range(1 , number + 1 ):
snake_case_ = sys.maxsize
snake_case_ = int(math.sqrt(UpperCAmelCase ) )
for j in range(1 , root + 1 ):
snake_case_ = 1 + answers[i - (j**2)]
snake_case_ = min(UpperCAmelCase , UpperCAmelCase )
snake_case_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
A__ : Optional[int] =logging.get_logger(__name__)
@dataclass
class UpperCAmelCase :
def __init__( self : str , __snake_case : Optional[int]=False , __snake_case : Any=False , __snake_case : Union[str, Any]=6.0 , __snake_case : Optional[Any]=None , __snake_case : Any=False , __snake_case : str=False , __snake_case : Any=None , __snake_case : List[str]="fp4" , __snake_case : Tuple=False , **__snake_case : int , ) -> List[str]:
_lowerCAmelCase = load_in_abit
_lowerCAmelCase = load_in_abit
_lowerCAmelCase = llm_inta_threshold
_lowerCAmelCase = llm_inta_skip_modules
_lowerCAmelCase = llm_inta_enable_fpaa_cpu_offload
_lowerCAmelCase = llm_inta_has_fpaa_weight
_lowerCAmelCase = bnb_abit_quant_type
_lowerCAmelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_lowerCAmelCase = torch.floataa
elif isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = getattr(__snake_case , __snake_case )
elif isinstance(__snake_case , torch.dtype ):
_lowerCAmelCase = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def lowercase__ ( self : str ) -> Optional[int]:
if not isinstance(self.llm_inta_threshold , __snake_case ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __snake_case ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __snake_case ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , __snake_case ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , __snake_case ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , __snake_case ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
return self.load_in_abit or self.load_in_abit
def lowercase__ ( self : str ) -> Optional[Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowercase__ ( cls : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , **__snake_case : Any ) -> str:
_lowerCAmelCase = cls(**__snake_case )
_lowerCAmelCase = []
for key, value in kwargs.items():
if hasattr(__snake_case , __snake_case ):
setattr(__snake_case , __snake_case , __snake_case )
to_remove.append(__snake_case )
for key in to_remove:
kwargs.pop(__snake_case , __snake_case )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowercase__ ( self : Tuple , __snake_case : Union[str, os.PathLike] ) -> str:
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
_lowerCAmelCase = self.to_dict()
_lowerCAmelCase = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + """\n"""
writer.write(__snake_case )
def lowercase__ ( self : Optional[int] ) -> Dict[str, Any]:
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
return f"{self.__class__.__name__} {self.to_json_string()}"
def lowercase__ ( self : Any , __snake_case : bool = True ) -> str:
if use_diff is True:
_lowerCAmelCase = self.to_diff_dict()
else:
_lowerCAmelCase = self.to_dict()
return json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
def lowercase__ ( self : Optional[int] ) -> Dict[str, Any]:
_lowerCAmelCase = self.to_dict()
# get the default config dict
_lowerCAmelCase = BitsAndBytesConfig().to_dict()
_lowerCAmelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_lowerCAmelCase = value
return serializable_config_dict
| 70 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A ( a_ ) -> List[Any]: # picklable for multiprocessing
return x.sum()
def A ( a_ ) -> Tuple: # picklable for multiprocessing
return i + 1
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : int
UpperCamelCase__ : str
class __A ( a ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ={}
__UpperCamelCase : Union[str, Any] =[]
__UpperCamelCase : List[str] =1
__UpperCamelCase : List[Any] =[1, 2]
__UpperCamelCase : Dict ={'a': 1, 'b': 2}
__UpperCamelCase : Union[str, Any] ={'a': [1, 2], 'b': [3, 4]}
__UpperCamelCase : Tuple ={'a': {'1': 1}, 'b': 2}
__UpperCamelCase : Union[str, Any] ={'a': 1, 'b': 2, 'c': 3, 'd': 4}
__UpperCamelCase : Dict ={}
__UpperCamelCase : Optional[Any] =[]
__UpperCamelCase : str =2
__UpperCamelCase : str =[2, 3]
__UpperCamelCase : Any ={'a': 2, 'b': 3}
__UpperCamelCase : Any ={'a': [2, 3], 'b': [4, 5]}
__UpperCamelCase : List[str] ={'a': {'1': 2}, 'b': 3}
__UpperCamelCase : str ={'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__UpperCamelCase : Tuple =2
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
__UpperCamelCase : Tuple ={'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
__UpperCamelCase : List[Any] ={'a': 2, 'b': 0, 'c': 2}
__UpperCamelCase : Tuple ={
'a': np.eye(2 ).astype(lowerCamelCase__ ),
'b': np.zeros(3 ).astype(lowerCamelCase__ ),
'c': np.ones(2 ).astype(lowerCamelCase__ ),
}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowerCamelCase__ ): # can't pickle a local lambda
map_nested(lambda lowerCamelCase__ : x + 1 , lowerCamelCase__ , num_proc=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ={'a': 1, 'b': 2}
__UpperCamelCase : Any ={'a': 3, 'b': 4}
__UpperCamelCase : Union[str, Any] ={'a': 5, 'b': 6}
__UpperCamelCase : Dict =sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
class __A :
"""simple docstring"""
UpperCamelCase__ : int ="""bar"""
__UpperCamelCase : List[Any] =Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(lowerCamelCase__ , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def A ( a_ ,a_ ,a_ ) -> List[Any]:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
__UpperCamelCase : Union[str, Any] ={F'{i}': i for i in range(a_ )}
__UpperCamelCase : List[str] =map_nested(lambda a_ : x + 10 ,a_ ,num_proc=a_ ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __A ( a ):
"""simple docstring"""
@require_tf
def __lowercase ( self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
__UpperCamelCase : Dict =layers.Dense(2 )
def gen_random_output():
__UpperCamelCase : Any =tf.random.uniform((1, 3) )
return model(lowerCamelCase__ ).numpy()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
__UpperCamelCase : Optional[int] =gen_random_output()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
__UpperCamelCase : Tuple =gen_random_output()
__UpperCamelCase : List[str] =gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowercase ( self ):
"""simple docstring"""
import torch
def gen_random_output():
__UpperCamelCase : Tuple =torch.nn.Linear(3 , 2 )
__UpperCamelCase : List[str] =torch.rand(1 , 3 )
return model(lowerCamelCase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
__UpperCamelCase : Tuple =gen_random_output()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
__UpperCamelCase : int =gen_random_output()
__UpperCamelCase : Union[str, Any] =gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowercase ( self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
__UpperCamelCase : List[str] =gen_random_output()
with temp_seed(42 ):
__UpperCamelCase : List[Any] =gen_random_output()
__UpperCamelCase : List[str] =gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' ,[{}] )
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase : int =NestedDataStructure(a_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' ,[
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] ,)
def A ( a_ ,a_ ) -> Optional[int]:
__UpperCamelCase : List[str] =NestedDataStructure(a_ ).flatten()
assert output == expected_output
def A ( ) -> int:
__UpperCamelCase : int =A(x=1 ,y='foobar' )
__UpperCamelCase : Dict ={'x': 1, 'y': 'foobar'}
assert asdict(a_ ) == expected_output
__UpperCamelCase : Tuple ={'a': {'b': A(x=10 ,y='foo' )}, 'c': [A(x=20 ,y='bar' )]}
__UpperCamelCase : Optional[Any] ={'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(a_ ) == expected_output
with pytest.raises(a_ ):
asdict([1, A(x=10 ,y='foo' )] )
def A ( a_ ) -> int:
return text.split()
def A ( a_ ) -> Optional[Any]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A ( ) -> List[Any]:
with Pool(2 ) as pool:
__UpperCamelCase : Optional[Any] =list(iflatmap_unordered(a_ ,_split_text ,kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(a_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__UpperCamelCase : Dict =list(iflatmap_unordered(a_ ,_split_text ,kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(a_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__UpperCamelCase : List[Any] =[]
for yield_time, content in iflatmap_unordered(
a_ ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(a_ )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(a_ ) == 4
| 71 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = np.max(A_, axis=-1, keepdims=A_ )
_lowerCamelCase : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=A_ )
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = {}
if "second_text" in kwargs:
_lowerCamelCase : Dict = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Any=None ):
"""simple docstring"""
return self.tokenizer(__lowerCAmelCase , text_pair=__lowerCAmelCase , return_tensors=self.framework )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.model(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = model_outputs.logits[0].numpy()
_lowerCamelCase : List[Any] = softmax(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = np.argmax(__lowerCAmelCase )
_lowerCamelCase : Any = self.model.config.idalabel[best_class]
_lowerCamelCase : Dict = probabilities[best_class].item()
_lowerCamelCase : List[str] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 72 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
inspect_dataset(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
inspect_metric(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Tuple = get_dataset_config_names(lowerCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Tuple = get_dataset_infos(lowerCamelCase__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = get_dataset_infos(lowerCamelCase__ )
assert expected_config in infos
__lowerCamelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_split_names(lowerCamelCase__ , config_name=lowerCamelCase__ )
| 73 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json'''}
_lowercase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
_lowercase = {'''mgp-str''': 27}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] ,A_ : str ,A_ : List[Any]="[GO]" ,A_ : Any="[GO]" ,A_ : Union[str, Any]="[s]" ,A_ : Dict="[GO]" ,**A_ : Any ) -> str:
super().__init__(
unk_token=A_ ,bos_token=A_ ,eos_token=A_ ,pad_token=A_ ,**A_ ,)
with open(A_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(A_ )
A = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return len(self.vocab )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return dict(self.vocab ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ) -> Union[str, Any]:
A = []
for s in text:
char_tokens.extend(A_ )
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> List[Any]:
return self.vocab.get(A_ ,self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> str:
return self.decoder.get(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(A_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' )
return (vocab_file,) | 74 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
'''simple docstring'''
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =[]
# Stores indexes of each item for supporting updates and deletion.
lowerCamelCase_ ={}
# Stores current size of heap.
lowerCamelCase_ =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCamelCase_ =key or (lambda lowerCAmelCase : x)
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCamelCase_, lowerCamelCase_ =self.arr[j], self.arr[i]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self._left(lowerCAmelCase )
lowerCamelCase_ =self._right(lowerCAmelCase )
lowerCamelCase_ =i
if left is not None and not self._cmp(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =left
if right is not None and not self._cmp(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =right
return valid_parent
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self._parent(lowerCAmelCase )
while parent is not None and not self._cmp(lowerCAmelCase, lowerCAmelCase ):
self._swap(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =parent, self._parent(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self._get_valid_parent(lowerCAmelCase )
while valid_parent != index:
self._swap(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =valid_parent, self._get_valid_parent(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if item not in self.pos_map:
return
lowerCamelCase_ =self.pos_map[item]
lowerCamelCase_ =[item, self.key(lowerCAmelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase )
self._heapify_down(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if item not in self.pos_map:
return
lowerCamelCase_ =self.pos_map[item]
del self.pos_map[item]
lowerCamelCase_ =self.arr[self.size - 1]
lowerCamelCase_ =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase )
self._heapify_down(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase )] )
else:
lowerCamelCase_ =[item, self.key(lowerCAmelCase )]
lowerCamelCase_ =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowercase__ ( self ):
"""simple docstring"""
return self.arr[0] if self.size else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def a_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_script.main )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
debug_launcher(test_ops.main ) | 76 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
"""simple docstring"""
import torch
def a_ ( ):
'''simple docstring'''
if torch.cuda.is_available():
lowercase__ : Tuple = torch.cuda.device_count()
else:
lowercase__ : Optional[int] = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 77 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
snake_case_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def _lowerCAmelCase ( ):
UpperCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
UpperCAmelCase = g.get_repo('huggingface/accelerate' )
UpperCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase_ : i.created_at , reverse=lowercase_ )
UpperCAmelCase = comments[0] if len(lowercase_ ) > 0 else None
UpperCAmelCase = dt.utcnow()
UpperCAmelCase = (current_time - issue.updated_at).days
UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 78 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
'''simple docstring'''
from string import ascii_uppercase
a__ : List[str] = {char: i for i, char in enumerate(ascii_uppercase)}
a__ : str = dict(enumerate(ascii_uppercase))
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = len(__A )
UpperCamelCase__ = 0
while True:
if x == i:
UpperCamelCase__ = 0
if len(__A ) == len(__A ):
break
key += key[i]
i += 1
return key
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
UpperCamelCase__ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCamelCase__ = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
UpperCamelCase__ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCamelCase__ = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = "THE GERMAN ATTACK"
UpperCamelCase__ = "SECRET"
UpperCamelCase__ = generate_key(__A , __A )
UpperCamelCase__ = cipher_text(__A , __A )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ : List[str] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase_ : List[Any] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=13_37 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case ) == len(snake_case )
_lowerCAmelCase = SplitDict._from_yaml_list(snake_case )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
_lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=snake_case ), SplitInfo(dataset_name="""my_dataset""" )] )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 82 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
snake_case_ : Any = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
"""simple docstring"""
from statistics import mean
import numpy as np
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : int ) -> list:
'''simple docstring'''
lowerCAmelCase_ :Tuple = 0
# Number of processes finished
lowerCAmelCase_ :Tuple = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase_ :Optional[int] = [0] * no_of_process
# List to include calculation results
lowerCAmelCase_ :Union[str, Any] = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase_ :Union[str, Any] = [burst_time[i] for i in np.argsort(lowercase__ )]
lowerCAmelCase_ :Union[str, Any] = [process_name[i] for i in np.argsort(lowercase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase_ :List[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase_ :str = arrival_time[i]
lowerCAmelCase_ :Any = 0
# Index showing the location of the process being performed
lowerCAmelCase_ :Tuple = 0
# Saves the current response ratio.
lowerCAmelCase_ :str = 0
for i in range(0 , lowercase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase_ :int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase_ :Tuple = temp
lowerCAmelCase_ :Tuple = i
# Calculate the turn around time
lowerCAmelCase_ :Union[str, Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase_ :Dict = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : int ) -> list:
'''simple docstring'''
lowerCAmelCase_ :Tuple = [0] * no_of_process
for i in range(0 , lowercase__ ):
lowerCAmelCase_ :Any = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__UpperCAmelCase = 5
__UpperCAmelCase = ['A', 'B', 'C', 'D', 'E']
__UpperCAmelCase = [1, 2, 3, 4, 5]
__UpperCAmelCase = [1, 2, 3, 4, 5]
__UpperCAmelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__UpperCAmelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 84 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = torch.load(snake_case , map_location="cpu" )
if "model" in sd.keys():
snake_case_ = torch.load(snake_case , map_location="cpu" )["model"]
# pop unnecessary weights
snake_case_ = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case )
snake_case_ = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case_ = sd.pop(snake_case )
snake_case_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case_ = sd[key]
# We split QKV in separate Q,K,V
snake_case_ = key.replace(".qkv_proj." , ".q_proj." )
snake_case_ = key.replace(".qkv_proj." , ".k_proj." )
snake_case_ = key.replace(".qkv_proj." , ".v_proj." )
snake_case_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case_ , snake_case_ , snake_case_ = torch.split(snake_case , depth // 3 , dim=0 )
snake_case_ = q
snake_case_ = k
snake_case_ = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any]=None ):
'''simple docstring'''
snake_case_ = load_checkpoint(snake_case )
if config is not None:
snake_case_ = OPTConfig.from_pretrained(snake_case )
else:
snake_case_ = OPTConfig()
snake_case_ = OPTModel(snake_case ).half().eval()
model.load_state_dict(snake_case )
# Check results
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 85 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowerCamelCase__ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowerCamelCase__ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowerCamelCase__ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
lowercase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , "width_multiplier" ) )
class snake_case_ :
def __init__( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=13 , lowercase_ : List[str]=64 , lowercase_ : Any=2 , lowercase_ : Dict=3 , lowercase_ : Tuple="swish" , lowercase_ : List[Any]=3 , lowercase_ : List[Any]=32 , lowercase_ : str=0.1 , lowercase_ : Any=0.02 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=10 , lowercase_ : List[str]=None , lowercase_ : int=0.25 , lowercase_ : List[str]=0.0 , lowercase_ : str=0.0 , ) -> Dict:
lowercase__ : Optional[int] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : str = image_size
lowercase__ : Union[str, Any] = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : Optional[int] = make_divisible(5_12 * width_multiplier , divisor=8 )
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Union[str, Any] = conv_kernel_size
lowercase__ : Tuple = output_stride
lowercase__ : Dict = classifier_dropout_prob
lowercase__ : Optional[int] = use_labels
lowercase__ : Dict = is_training
lowercase__ : Optional[int] = num_labels
lowercase__ : Tuple = initializer_range
lowercase__ : List[Any] = scope
lowercase__ : List[Any] = width_multiplier
lowercase__ : Optional[Any] = ffn_dropout
lowercase__ : List[Any] = attn_dropout
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Union[str, Any] = None
lowercase__ : List[Any] = None
if self.use_labels:
lowercase__ : int = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : Any ) -> List[str]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict ) -> Union[str, Any]:
lowercase__ : Any = MobileViTVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : List[str] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ) -> Tuple:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : List[Any] = MobileViTVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> Union[str, Any]:
lowercase__ : Any = self.num_labels
lowercase__ : Dict = MobileViTVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Tuple = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : Tuple = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : Optional[int] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__A : Tuple = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : List[str] = False
__A : List[str] = False
__A : List[Any] = False
def __UpperCamelCase ( self : str ) -> List[Any]:
lowercase__ : int = MobileViTVaModelTester(self )
lowercase__ : Optional[int] = MobileViTVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __UpperCamelCase ( self : str ) -> List[Any]:
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __UpperCamelCase ( self : Any ) -> int:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(lowercase_ )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
def check_hidden_states_output(lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict ):
lowercase__ : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : Any = 5
self.assertEqual(len(lowercase_ ) , lowercase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : Optional[int] = 2
for i in range(len(lowercase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> str:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __UpperCamelCase ( self : Any ) -> int:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = MobileViTVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowercase_ ( ):
lowercase__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : List[str] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
lowercase_ )
lowercase__ : int = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[int] = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**lowercase_ )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase__ : Tuple = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowercase__ : int = model.to(lowercase_ )
lowercase__ : Any = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowercase__ : List[str] = prepare_img()
lowercase__ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**lowercase_ )
lowercase__ : Tuple = outputs.logits
# verify the logits
lowercase__ : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowercase_ )
lowercase__ : Optional[Any] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self : Any ) -> List[str]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowercase__ : int = model.to(lowercase_ )
lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowercase__ : List[str] = prepare_img()
lowercase__ : Tuple = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**lowercase_ )
lowercase__ : Tuple = outputs.logits.detach().cpu()
lowercase__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(50, 60)] )
lowercase__ : List[str] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowercase_ )
lowercase__ : int = image_processor.post_process_semantic_segmentation(outputs=lowercase_ )
lowercase__ : Dict = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowercase_ )
| 87 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : int=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=None , ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def _lowercase ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = True
__magic_name__ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , ) -> Dict:
"""simple docstring"""
__magic_name__ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = True
__magic_name__ = True
__magic_name__ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
__magic_name__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ = torch.cat([input_mask, next_mask] , dim=-1 )
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
__magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx].detach()
__magic_name__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def _lowercase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
a__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
a__ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = OpenLlamaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = input_dict["""input_ids"""]
__magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ )
__magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = """single_label_classification"""
__magic_name__ = input_dict["""input_ids"""]
__magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ )
__magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = """multi_label_classification"""
__magic_name__ = input_dict["""input_ids"""]
__magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ )
__magic_name__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase ( self : str ) -> int:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = ids_tensor([1, 10] , config.vocab_size )
__magic_name__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__magic_name__ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
__magic_name__ = original_model(UpperCamelCase__ ).last_hidden_state
__magic_name__ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__magic_name__ = {"""type""": scaling_type, """factor""": 10.0}
__magic_name__ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
__magic_name__ = scaled_model(UpperCamelCase__ ).last_hidden_state
__magic_name__ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 88 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
# initialize config
if "resnet-50" in model_name:
_a : Optional[int] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_a : Optional[int] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_a : Any = DetrConfig(use_timm_backbone=lowerCAmelCase_ , backbone_config=lowerCAmelCase_ )
# set label attributes
_a : Optional[int] = 'panoptic' in model_name
if is_panoptic:
_a : Any = 250
else:
_a : Optional[int] = 91
_a : int = 'huggingface/label-files'
_a : Any = 'coco-detection-id2label.json'
_a : Any = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : List[str] = idalabel
_a : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Union[str, Any] = state_dict.pop(lowerCAmelCase_ )
_a : str = val
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[str]:
_a : Optional[int] = ''
if is_panoptic:
_a : List[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_a : str = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[:256, :]
_a : Any = in_proj_bias[:256]
_a : Dict = in_proj_weight[256:512, :]
_a : Optional[int] = in_proj_bias[256:512]
_a : Tuple = in_proj_weight[-256:, :]
_a : List[str] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_a : Tuple = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Any = in_proj_weight[:256, :]
_a : Any = in_proj_bias[:256]
_a : str = in_proj_weight[256:512, :]
_a : Optional[Any] = in_proj_bias[256:512]
_a : Any = in_proj_weight[-256:, :]
_a : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_a : List[Any] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_a : Union[str, Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a : Union[str, Any] = in_proj_weight_cross_attn[:256, :]
_a : Any = in_proj_bias_cross_attn[:256]
_a : int = in_proj_weight_cross_attn[256:512, :]
_a : Dict = in_proj_bias_cross_attn[256:512]
_a : Union[str, Any] = in_proj_weight_cross_attn[-256:, :]
_a : Optional[int] = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( ) -> Optional[Any]:
_a : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : str = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> Dict:
_a , _a : Dict = get_detr_config(lowerCAmelCase_ )
# load original model from torch hub
_a : Union[str, Any] = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(f"""Converting model {model_name}...""" )
_a : int = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCAmelCase_ ).eval()
_a : int = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCAmelCase_ ):
if is_panoptic:
_a : Optional[int] = 'detr.' + src
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase_ , is_panoptic=lowerCAmelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a : Dict = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_a : Optional[int] = state_dict.pop(lowerCAmelCase_ )
_a : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_a : int = state_dict.pop(lowerCAmelCase_ )
_a : Dict = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_a : List[Any] = state_dict.pop(lowerCAmelCase_ )
_a : Optional[int] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_a : Union[str, Any] = state_dict.pop(lowerCAmelCase_ )
_a : List[Any] = val
# finally, create HuggingFace model and load state dict
_a : List[str] = DetrForSegmentation(lowerCAmelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# verify our conversion on an image
_a : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_a : Any = DetrImageProcessor(format=lowerCAmelCase_ )
_a : Optional[Any] = processor(images=prepare_img() , return_tensors='pt' )
_a : Dict = encoding['pixel_values']
_a : str = detr(lowerCAmelCase_ )
_a : Optional[Any] = model(lowerCAmelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
__lowerCAmelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
from itertools import permutations
def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowerCamelCase = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : int = 10 ) -> int:
"""simple docstring"""
return sum(
int(''.join(map(UpperCamelCase__ , UpperCamelCase__ ) ) )
for num in permutations(range(UpperCamelCase__ ) )
if is_substring_divisible(UpperCamelCase__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Dict = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ["""GLPNFeatureExtractor"""]
UpperCAmelCase_ : Union[str, Any] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class a__ ( snake_case__ ):
_a : Optional[Any] = """openai-gpt"""
_a : List[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=4_0_4_7_8 , _A=5_1_2 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A="cls_index" , _A=True , _A=None , _A=True , _A=0.1 , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = afn
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = summary_type
__lowerCAmelCase = summary_use_proj
__lowerCAmelCase = summary_activation
__lowerCAmelCase = summary_first_dropout
__lowerCAmelCase = summary_proj_to_labels
super().__init__(**_A )
| 92 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : str = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''lilt'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_05_22 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=10_24 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Tuple = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : List[str] = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : Tuple = classifier_dropout
lowercase_ : Any = channel_shrink_ratio
lowercase_ : List[Any] = max_ad_position_embeddings
| 93 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
import numpy as np
snake_case : List[str] = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class _snake_case :
def __init__( self ):
a :Dict = np.array(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a , a :Optional[int] = np.where(letter == self.SQUARE )
a :List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = self.SQUARE[indexa - 1, indexa - 1]
return letter
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[str] = message.lower()
a :str = message.replace(''' ''' , '''''' )
a :Optional[Any] = message.replace('''j''' , '''i''' )
a :Optional[Any] = np.empty((2, len(_lowerCamelCase )) )
for letter_index in range(len(_lowerCamelCase ) ):
a :str = self.letter_to_numbers(message[letter_index] )
a :Union[str, Any] = numbers[0]
a :Dict = numbers[1]
a :Optional[Any] = first_step.reshape(2 * len(_lowerCamelCase ) )
a :List[Any] = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
a :Tuple = int(second_step[numbers_index * 2] )
a :List[Any] = int(second_step[(numbers_index * 2) + 1] )
a :Dict = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase )
a :Dict = encoded_message + letter
return encoded_message
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = message.lower()
message.replace(''' ''' , '''''' )
a :Any = np.empty(2 * len(_lowerCamelCase ) )
for letter_index in range(len(_lowerCamelCase ) ):
a :Union[str, Any] = self.letter_to_numbers(message[letter_index] )
a :int = numbers[0]
a :Optional[Any] = numbers[1]
a :Optional[Any] = first_step.reshape((2, len(_lowerCamelCase )) )
a :Tuple = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
a :Union[str, Any] = int(second_step[0, numbers_index] )
a :Union[str, Any] = int(second_step[1, numbers_index] )
a :Tuple = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = decoded_message + letter
return decoded_message
| 94 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : int =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _A ( ):
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyInpaintPipeline
lowerCamelCase__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowerCamelCase__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowerCamelCase__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCamelCase__ = False
@property
def A_ ( self ):
return 32
@property
def A_ ( self ):
return 32
@property
def A_ ( self ):
return self.time_input_dim
@property
def A_ ( self ):
return self.time_input_dim * 4
@property
def A_ ( self ):
return 100
@property
def A_ ( self ):
_lowerCamelCase : Tuple = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCamelCase : str = MultilingualCLIP(lowercase )
_lowerCamelCase : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[str] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**lowercase )
return model
@property
def A_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.dummy_text_encoder
_lowerCamelCase : Optional[Any] = self.dummy_tokenizer
_lowerCamelCase : Optional[int] = self.dummy_unet
_lowerCamelCase : Tuple = self.dummy_movq
_lowerCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
_lowerCamelCase : List[str] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A_ ( self , lowercase , lowercase=0 ):
_lowerCamelCase : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase ) ).to(lowercase )
_lowerCamelCase : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase )
# create init_image
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase : List[str] = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCamelCase : Tuple = np.ones((64, 64) , dtype=np.floataa )
_lowerCamelCase : Optional[int] = 0
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : List[Any] = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : str = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def A_ ( self ):
_lowerCamelCase : List[Any] = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : List[str] = self.pipeline_class(**lowercase )
_lowerCamelCase : Optional[int] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : int = pipe(**self.get_dummy_inputs(lowercase ) )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : Dict = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : List[Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
_lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
_lowerCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCamelCase : str = np.ones((768, 768) , dtype=np.floataa )
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = 'a hat'
_lowerCamelCase : Optional[int] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
_lowerCamelCase : List[Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
_lowerCamelCase : Optional[int] = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCamelCase : int = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase ) | 96 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
'''simple docstring'''
import numpy as np
import datasets
__snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
__snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
__snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = np.array(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = np.array(UpperCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCamelCase__ :int = X - np.mean(UpperCamelCase_ )
UpperCamelCase__ :List[str] = np.cov(reference_distribution.T )
try:
UpperCamelCase__ :Any = np.linalg.inv(UpperCamelCase_ )
except np.linalg.LinAlgError:
UpperCamelCase__ :Tuple = np.linalg.pinv(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = np.dot(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 97 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase = 1_0**1_2 ):
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=2 , lowercase=32 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=4 , lowercase=[0, 1, 2, 3] , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=[1, 384, 24, 24] , lowercase=True , lowercase=None , ) -> Dict:
'''simple docstring'''
a__ : List[str] = parent
a__ : Optional[Any] = batch_size
a__ : Optional[int] = image_size
a__ : List[Any] = patch_size
a__ : List[Any] = num_channels
a__ : Any = is_training
a__ : Dict = use_labels
a__ : Dict = hidden_size
a__ : Tuple = num_hidden_layers
a__ : int = backbone_out_indices
a__ : int = num_attention_heads
a__ : Any = intermediate_size
a__ : Optional[Any] = hidden_act
a__ : Tuple = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : int = initializer_range
a__ : Optional[int] = num_labels
a__ : int = backbone_featmap_shape
a__ : Dict = scope
a__ : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a__ : List[str] = (image_size // patch_size) ** 2
a__ : str = num_patches + 1
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : int = None
if self.use_labels:
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a__ : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = DPTModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : str = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : str = self.num_labels
a__ : Dict = DPTForDepthEstimation(lowercase)
model.to(lowercase)
model.eval()
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : List[Any] = self.num_labels
a__ : str = DPTForSemanticSegmentation(lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , labels=lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : Any = config_and_inputs
a__ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__A : Tuple = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : str = False
__A : Optional[int] = False
__A : int = False
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = DPTModelTester(self)
a__ : Optional[int] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds')
def __lowercase ( self) -> List[str]:
'''simple docstring'''
pass
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Tuple = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(lowercase)
a__ : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Tuple = [*signature.parameters.keys()]
a__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
if model_class in get_values(lowercase):
continue
a__ : Optional[int] = model_class(lowercase)
model.to(lowercase)
model.train()
a__ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
a__ : Optional[Any] = model(**lowercase).loss
loss.backward()
def __lowercase ( self) -> str:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a__ , a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[Any] = False
a__ : Any = True
if model_class in get_values(lowercase) or not model_class.supports_gradient_checkpointing:
continue
a__ : Tuple = model_class(lowercase)
model.to(lowercase)
model.gradient_checkpointing_enable()
model.train()
a__ : List[Any] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
a__ : Any = model(**lowercase).loss
loss.backward()
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : int = _config_zero_init(lowercase)
for model_class in self.all_model_classes:
a__ : List[str] = model_class(config=lowercase)
# Skip the check for the backbone
a__ : Dict = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a__ : Optional[int] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a__ : Dict = DPTModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[int] = 'add'
with self.assertRaises(lowercase):
a__ : str = DPTForDepthEstimation(lowercase)
def A_ ( ) -> Any:
a__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : str = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas')
a__ : List[Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas').to(lowercase)
a__ : Dict = prepare_img()
a__ : Any = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : str = model(**lowercase)
a__ : List[Any] = outputs.predicted_depth
# verify the predicted depth
a__ : Optional[int] = torch.Size((1, 384, 384))
self.assertEqual(predicted_depth.shape , lowercase)
a__ : Optional[Any] = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]]).to(lowercase)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , lowercase , atol=1e-4))
| 99 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Union[List[PIL.Image.Image], np.ndarray]
__lowercase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : np.ndarray
__lowercase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 100 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.