code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , a_ : Optional[Any]=2 , a_ : Tuple=3 , a_ : Any=64 , a_ : str=None ):
"""simple docstring"""
__snake_case = np.random.default_rng(a_ )
__snake_case = length
__snake_case = rng.normal(size=(length,) ).astype(np.floataa )
__snake_case = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[Any] ):
"""simple docstring"""
return self.length
def __getitem__( self : Optional[Any] , a_ : List[str] ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class SCREAMING_SNAKE_CASE__ ( torch.nn.Module ):
def __init__( self : Any , a_ : List[str]=0 , a_ : Union[str, Any]=0 , a_ : Tuple=False ):
"""simple docstring"""
super().__init__()
__snake_case = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case = True
def A ( self : List[str] , a_ : int=None ):
"""simple docstring"""
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__snake_case = False
return x * self.a[0] + self.b[0]
class SCREAMING_SNAKE_CASE__ ( torch.nn.Module ):
def __init__( self : Dict , a_ : Dict=0 , a_ : Any=0 , a_ : Optional[Any]=False ):
"""simple docstring"""
super().__init__()
__snake_case = torch.nn.Parameter(torch.tensor(a_ ).float() )
__snake_case = torch.nn.Parameter(torch.tensor(a_ ).float() )
__snake_case = True
def A ( self : Dict , a_ : Any=None ):
"""simple docstring"""
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
__snake_case = False
return x * self.a + self.b
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : int = 16 ) -> str:
from datasets import load_dataset
from transformers import AutoTokenizer
__snake_case = AutoTokenizer.from_pretrained("bert-base-cased" )
__snake_case = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
__snake_case = load_dataset("csv" , data_files=_UpperCAmelCase )
__snake_case = datasets["train"].unique("label" )
__snake_case = {v: i for i, v in enumerate(_UpperCAmelCase )}
def tokenize_function(_UpperCAmelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__snake_case = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" )
if "label" in examples:
__snake_case = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__snake_case = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_UpperCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(_UpperCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__snake_case = DataLoader(tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=2 )
__snake_case = DataLoader(tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 69
|
from typing import Any
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# Creates data structures and fill initial step
UpperCamelCase__ : dict = {}
UpperCamelCase__ : dict = {}
for state in states_space:
UpperCamelCase__ : Optional[int] = observations_space[0]
UpperCamelCase__ : Any = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase__ : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCamelCase__ ) ):
UpperCamelCase__ : str = observations_space[o]
UpperCamelCase__ : Union[str, Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase__ : int = ''''''
UpperCamelCase__ : List[str] = -1
for k_state in states_space:
UpperCamelCase__ : Union[str, Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase__ : Tuple = probability
UpperCamelCase__ : Union[str, Any] = k_state
# Update probabilities and pointers dicts
UpperCamelCase__ : Tuple = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase__ : Optional[Any] = arg_max
# The final observation
UpperCamelCase__ : List[str] = observations_space[len(UpperCamelCase__ ) - 1]
# argmax for given final observation
UpperCamelCase__ : Dict = ''''''
UpperCamelCase__ : Tuple = -1
for k_state in states_space:
UpperCamelCase__ : Any = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase__ : List[str] = probability
UpperCamelCase__ : Tuple = k_state
UpperCamelCase__ : Any = arg_max
# Process pointers backwards
UpperCamelCase__ : List[Any] = last_state
UpperCamelCase__ : int = []
for o in range(len(UpperCamelCase__ ) - 1 , -1 , -1 ):
result.append(UpperCamelCase__ )
UpperCamelCase__ : int = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_not_empty(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
_validate_lists(UpperCamelCase__ , UpperCamelCase__ )
_validate_dicts(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_list(UpperCamelCase__ , '''observations_space''' )
_validate_list(UpperCamelCase__ , '''states_space''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = f'''{var_name} must be a list'''
raise ValueError(UpperCamelCase__ )
else:
for x in _object:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = f'''{var_name} must be a list of strings'''
raise ValueError(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_dict(UpperCamelCase__ , '''initial_probabilities''' , UpperCamelCase__ )
_validate_nested_dict(UpperCamelCase__ , '''transition_probabilities''' )
_validate_nested_dict(UpperCamelCase__ , '''emission_probabilities''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_dict(_object , UpperCamelCase__ , UpperCamelCase__ )
for x in _object.values():
_validate_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[str] = f'''{var_name} must be a dict'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object ):
UpperCamelCase__ : Dict = f'''{var_name} all keys must be strings'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object.values() ):
UpperCamelCase__ : Optional[Any] = '''nested dictionary ''' if nested else ''''''
UpperCamelCase__ : Optional[Any] = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 285
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_A : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = AudioClassificationPipeline(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
# test with a raw waveform
snake_case_ : Tuple = np.zeros((3_40_00,) )
snake_case_ : List[Any] = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ , snake_case_ : Any = examples
snake_case_ : Dict = audio_classifier(_lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowerCamelCase , [
{"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )},
{"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )},
] , )
snake_case_ : Optional[Any] = audio_classifier(_lowerCamelCase , top_k=1 )
self.assertEqual(
_lowerCamelCase , [
{"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )},
] , )
self.run_torchaudio(_lowerCamelCase )
@require_torchaudio
def __UpperCamelCase (self , lowercase__ ):
import datasets
# test with a local file
snake_case_ : Tuple = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
snake_case_ : str = dataset[0]["""audio"""]["""array"""]
snake_case_ : Optional[Any] = audio_classifier(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )},
{"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )},
] , )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """anton-l/wav2vec2-random-tiny-classifier"""
snake_case_ : Optional[Any] = pipeline("""audio-classification""" , model=_lowerCamelCase )
snake_case_ : Dict = np.ones((80_00,) )
snake_case_ : int = audio_classifier(_lowerCamelCase , top_k=4 )
snake_case_ : Tuple = [
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
snake_case_ : List[Any] = [
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case_ : str = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
snake_case_ : Optional[int] = audio_classifier(_lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __UpperCamelCase (self ):
import datasets
snake_case_ : Optional[int] = """superb/wav2vec2-base-superb-ks"""
snake_case_ : Optional[Any] = pipeline("""audio-classification""" , model=_lowerCamelCase )
snake_case_ : Tuple = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
snake_case_ : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
snake_case_ : Tuple = audio_classifier(_lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __UpperCamelCase (self ):
pass
| 711
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class A (UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = 't5'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , lowercase_=3_2128 , lowercase_=512 , lowercase_=64 , lowercase_=2048 , lowercase_=6 , lowercase_=None , lowercase_=8 , lowercase_=32 , lowercase_=128 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=1.0 , lowercase_="relu" , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=1 , **lowercase_ , ) -> Any:
'''simple docstring'''
_snake_case : Optional[Any] = vocab_size
_snake_case : Optional[int] = d_model
_snake_case : int = d_kv
_snake_case : Any = d_ff
_snake_case : Dict = num_layers
_snake_case : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case : int = num_heads
_snake_case : Optional[Any] = relative_attention_num_buckets
_snake_case : List[Any] = relative_attention_max_distance
_snake_case : List[str] = dropout_rate
_snake_case : List[str] = layer_norm_epsilon
_snake_case : List[Any] = initializer_factor
_snake_case : Tuple = feed_forward_proj
_snake_case : Union[str, Any] = use_cache
_snake_case : Optional[Any] = self.feed_forward_proj.split('''-''' )
_snake_case : Dict = act_info[-1]
_snake_case : Dict = act_info[0] == """gated"""
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_snake_case : int = """gelu_new"""
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ , )
class A (UpperCAmelCase__ ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_snake_case : Dict = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
_snake_case : Tuple = """past_encoder_sequence + sequence"""
_snake_case : Dict = {0: """batch"""}
_snake_case : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_snake_case : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
_snake_case : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' )
return common_inputs
@property
def __a ( self ) -> int:
'''simple docstring'''
return 13
| 326
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'Speech2TextFeatureExtractor'
UpperCamelCase : Optional[Any] = 'Speech2TextTokenizer'
def __init__( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor
SCREAMING_SNAKE_CASE_: List[Any] =False
def __call__( self : Dict , *lowerCAmelCase : str , **lowerCAmelCase : str ) -> str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""raw_speech""" )
else:
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""audio""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =kwargs.pop("""sampling_rate""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""text""" , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_: List[str] =args[0]
SCREAMING_SNAKE_CASE_: List[str] =args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_: Any =encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : int ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@contextmanager
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_: Dict =self.tokenizer
yield
SCREAMING_SNAKE_CASE_: int =self.feature_extractor
SCREAMING_SNAKE_CASE_: str =False
| 409
| 0
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def __lowerCamelCase ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ) -> Any:
__UpperCamelCase : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCamelCase : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase : str = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase : Optional[int] = 8
else:
__UpperCamelCase : Union[str, Any] = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCamelCase : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
__UpperCamelCase : str = 2
# Initialize accelerator
__UpperCamelCase : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase : List[str] = config["""lr"""]
__UpperCamelCase : List[Any] = int(config["""num_epochs"""] )
__UpperCamelCase : List[str] = int(config["""seed"""] )
__UpperCamelCase : Optional[int] = int(config["""batch_size"""] )
__UpperCamelCase : List[Any] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCAmelCase )
def inner_training_loop(__lowerCAmelCase : Tuple ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase : Dict = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
__UpperCamelCase , __UpperCamelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate scheduler
__UpperCamelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase : str = model(**__lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase : Dict = model(**__lowerCAmelCase )
__UpperCamelCase : Any = outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase : Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
__UpperCamelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __lowerCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __lowerCamelCase ( ) -> Any:
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCamelCase : Dict = parser.parse_args()
__UpperCamelCase : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 515
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['MaskFormerFeatureExtractor']
UpperCamelCase = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCamelCase = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 515
| 1
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = to_pil_image(__A )
snake_case , snake_case : str = pil_image.size
snake_case : Tuple = pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : List[str] = [idx for idx, word in enumerate(__A ) if not word.strip()]
snake_case : Optional[int] = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
snake_case : List[Any] = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
snake_case : Any = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : Union[str, Any] = []
for x, y, w, h in zip(__A , __A , __A , __A ):
snake_case : Union[str, Any] = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
snake_case : Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( snake_case__ ):
A__ : int = ['pixel_values']
def __init__( self : List[Any] , UpperCAmelCase__ : Tuple = True , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : Optional[int] = PILImageResampling.BILINEAR , UpperCAmelCase__ : Any = True , UpperCAmelCase__ : List[str] = 1 / 255 , UpperCAmelCase__ : Optional[Any] = True , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : str = True , UpperCAmelCase__ : int = None , UpperCAmelCase__ : Dict = "" , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
snake_case : Optional[Any] = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(_UpperCamelCase )
snake_case : Union[str, Any] = do_resize
snake_case : Any = size
snake_case : Dict = resample
snake_case : List[str] = do_rescale
snake_case : int = rescale_value
snake_case : Union[str, Any] = do_normalize
snake_case : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
snake_case : Optional[Any] = apply_ocr
snake_case : str = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] = PILImageResampling.BILINEAR , UpperCAmelCase__ : Dict = None , **UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}" )
snake_case : str = (size['''height'''], size['''width'''])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] = None , **UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] = None , **UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : Union[str, Any] = ChannelDimension.FIRST , **UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : Any = do_resize if do_resize is not None else self.do_resize
snake_case : Any = size if size is not None else self.size
snake_case : Union[str, Any] = get_size_dict(_UpperCamelCase )
snake_case : List[Any] = resample if resample is not None else self.resample
snake_case : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : int = do_normalize if do_normalize is not None else self.do_normalize
snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
snake_case : Optional[int] = image_std if image_std is not None else self.image_std
snake_case : List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : Tuple = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
snake_case : int = [to_numpy_array(_UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : List[str] = []
snake_case : int = []
for image in images:
snake_case , snake_case : List[str] = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
snake_case : Optional[int] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
snake_case : List[Any] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
snake_case : Union[str, Any] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
snake_case : List[str] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
snake_case : int = BatchFeature(data={'''pixel_values''': images} , tensor_type=_UpperCamelCase )
if apply_ocr:
snake_case : str = words_batch
snake_case : Union[str, Any] = boxes_batch
return data
| 598
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase__ ( __A :Any ,__A :tuple ,__A :Path ,__A :int ,__A :Union[str, Any] ,__A :Optional[Any] ,__A :Optional[Any] ,__A :List[Any]=False ,):
"""simple docstring"""
output_path.parent.mkdir(parents=__A ,exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,use_external_data_format=__A ,enable_onnx_checker=__A ,opset_version=__A ,)
else:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,opset_version=__A ,)
@torch.no_grad()
def lowerCamelCase__ ( __A :str ,__A :str ,__A :int ,__A :bool = False ):
"""simple docstring"""
__snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__snake_case = """cpu"""
__snake_case = Path(__A )
# VAE DECODER
__snake_case = AutoencoderKL.from_pretrained(model_path + """/vae""" )
__snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case = vae_decoder.decode
onnx_export(
__A ,model_args=(
torch.randn(1 ,__A ,2_5 ,2_5 ).to(device=__A ,dtype=__A ),
False,
) ,output_path=output_path / """vae_decoder""" / """model.onnx""" ,ordered_input_names=["""latent_sample""", """return_dict"""] ,output_names=["""sample"""] ,dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} ,opset=__A ,)
del vae_decoder
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCamelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 268
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
A__ = jnp.ones((batch_size, length)) / length
return scores
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
'''simple docstring'''
A__ = None
A__ = 20
A__ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__)
# tweak scores to not be uniform anymore
A__ = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
A__ = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
A__ = jax.nn.softmax(UpperCAmelCase__ , axis=-1)
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTemperatureLogitsWarper(temperature=1.3)
A__ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1)
A__ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
A__ = None
A__ = 10
A__ = 2
# create ramp distribution
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
A__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
A__ = FlaxTopKLogitsWarper(3)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
A__ = 5
A__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, length)).copy()
A__ = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = None
A__ = 10
A__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
A__ = FlaxTopPLogitsWarper(0.8)
A__ = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# check edge cases with negative and extreme logits
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
A__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
# check that min length is applied at length 5
A__ = ids_tensor((batch_size, 20) , vocab_size=20)
A__ = 5
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''')])
# check that min length is not applied anymore at length 15
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = 15
A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
# check that all scores are -inf except the bos_token_id score
A__ = ids_tensor((batch_size, 1) , vocab_size=20)
A__ = 1
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A__ = 3
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = 5
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
A__ = ids_tensor((batch_size, 4) , vocab_size=20)
A__ = 4
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A__ = 3
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = 4
A__ = 10
A__ = 15
A__ = 2
A__ = 1
A__ = 15
# dummy input_ids and scores
A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__)
A__ = input_ids.copy()
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scores.copy()
# instantiate all dist processors
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTopKLogitsWarper(3)
A__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
A__ = 10
# no processor list
A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# with processor list
A__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = 4
A__ = 10
A__ = 15
A__ = 2
A__ = 1
A__ = 15
# dummy input_ids and scores
A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__)
A__ = input_ids.copy()
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scores.copy()
# instantiate all dist processors
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTopKLogitsWarper(3)
A__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
A__ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]):
A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
return scores
# with processor list
def run_processor_list(UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]):
A__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
return scores
A__ = jax.jit(UpperCAmelCase__)
A__ = jax.jit(UpperCAmelCase__)
A__ = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 177
| 0
|
"""simple docstring"""
def a ( __UpperCAmelCase : int = 1_0_0_0 ) -> int:
__magic_name__: int = 2**power
__magic_name__: Any = 0
while n:
__magic_name__, __magic_name__: int = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 96
|
"""simple docstring"""
def a ( __UpperCAmelCase : List[Any] ) -> str:
__magic_name__: Optional[int] = [0] * len(__UpperCAmelCase )
__magic_name__: str = []
__magic_name__: Any = []
__magic_name__: Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCAmelCase )
while queue:
__magic_name__: Optional[Any] = queue.pop(0 )
cnt += 1
topo.append(__UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__UpperCAmelCase )
if cnt != len(__UpperCAmelCase ):
print("""Cycle exists""" )
else:
print(__UpperCAmelCase )
# Adjacency List of Graph
__lowerCamelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 96
| 1
|
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : List[str] = set(snake_case__ ), [start]
while stack:
A : Dict = stack.pop()
explored.add(snake_case__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(snake_case__ )
return explored
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 706
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=[0.5, 0.5, 0.5], ):
A : Dict = size if size is not None else {"""height""": 18, """width""": 18}
A : Optional[int] = parent
A : int = batch_size
A : List[Any] = num_channels
A : Optional[Any] = image_size
A : Union[str, Any] = min_resolution
A : List[str] = max_resolution
A : List[Any] = do_resize
A : List[Any] = size
A : Union[str, Any] = do_normalize
A : Union[str, Any] = image_mean
A : str = image_std
def _lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = ViTImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : List[str] = EfficientFormerImageProcessorTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__, """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """size""" ) )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : List[Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, Image.Image )
# Test not batched input
A : List[str] = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Dict = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, np.ndarray )
# Test not batched input
A : Union[str, Any] = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Any = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, torch.Tensor )
# Test not batched input
A : str = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Tuple = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
| 520
| 0
|
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
def __call__( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_UpperCamelCase = 1
_UpperCamelCase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
_UpperCamelCase = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
_UpperCamelCase = scheduler_output - scheduler_output + torch.ones_like(lowerCAmelCase__ )
return result
| 98
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) )
def lowercase ( self : str ) -> str:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> List[Any]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
| 53
| 0
|
"""simple docstring"""
from timeit import timeit
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
lowerCAmelCase_ : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
lowerCAmelCase_ : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
def do_benchmark(lowerCAmelCase__ : int ) -> None:
lowerCAmelCase_ : Optional[Any] = 'import __main__ as z'
print(f"Benchmark when {number = }:" )
print(f"{get_set_bits_count_using_modulo_operator(lowerCAmelCase__ ) = }" )
lowerCAmelCase_ : Any = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=lowerCAmelCase__ )
print(f"timeit() runs in {timing} seconds" )
print(f"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase__ ) = }" )
lowerCAmelCase_ : List[str] = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=lowerCAmelCase__ , )
print(f"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 317
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
lowercase__ : Union[str, Any] = list[list[float | int]]
def UpperCamelCase_ ( lowerCAmelCase__ : Matrix , lowerCAmelCase__ : Matrix ) -> Matrix:
"""simple docstring"""
lowerCAmelCase_ : int = len(lowerCAmelCase__ )
lowerCAmelCase_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase__ )]
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float
for row in range(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Dict = matrix[row][col]
lowerCAmelCase_ : List[str] = vector[row][0]
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[Any] = 0
while row < size and col < size:
# pivoting
lowerCAmelCase_ : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase__ , lowerCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = augmented[rowa][col] / augmented[row][col]
lowerCAmelCase_ : List[str] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase__ ):
for row in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[Any] = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCAmelCase__ )
]
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] ) -> Callable[[int], int]:
"""simple docstring"""
lowerCAmelCase_ : int = len(lowerCAmelCase__ )
lowerCAmelCase_ : Matrix = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
lowerCAmelCase_ : Matrix = [[0] for _ in range(lowerCAmelCase__ )]
lowerCAmelCase_ : Matrix
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
for x_val, y_val in enumerate(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = (x_val + 1) ** (size - col - 1)
lowerCAmelCase_ : List[Any] = y_val
lowerCAmelCase_ : List[str] = solve(lowerCAmelCase__ , lowerCAmelCase__ )
def interpolated_func(lowerCAmelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase__ ) )
return interpolated_func
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase_ ( lowerCAmelCase__ : Callable[[int], int] = question_function , lowerCAmelCase__ : int = 10 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : list[int] = [func(lowerCAmelCase__ ) for x_val in range(1 , order + 1 )]
lowerCAmelCase_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Callable[[int], int]
lowerCAmelCase_ : int
for poly in polynomials:
lowerCAmelCase_ : Union[str, Any] = 1
while func(lowerCAmelCase__ ) == poly(lowerCAmelCase__ ):
x_val += 1
ret += poly(lowerCAmelCase__ )
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 317
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''gpt_neo'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Dict , UpperCAmelCase : int=5_0257 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : str=2048 , UpperCAmelCase : str=24 , UpperCAmelCase : Tuple=[[["global", "local"], 12]] , UpperCAmelCase : int=16 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=256 , UpperCAmelCase : List[str]="gelu_new" , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Dict=True , UpperCAmelCase : int=5_0256 , UpperCAmelCase : List[str]=5_0256 , **UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =vocab_size
lowercase : Optional[int] =max_position_embeddings
lowercase : Tuple =hidden_size
lowercase : str =num_layers
lowercase : Optional[Any] =num_heads
lowercase : List[Any] =intermediate_size
lowercase : Union[str, Any] =window_size
lowercase : Optional[Any] =activation_function
lowercase : Union[str, Any] =resid_dropout
lowercase : List[str] =embed_dropout
lowercase : int =attention_dropout
lowercase : List[str] =classifier_dropout
lowercase : List[Any] =layer_norm_epsilon
lowercase : int =initializer_range
lowercase : List[Any] =use_cache
lowercase : Union[str, Any] =bos_token_id
lowercase : Optional[int] =eos_token_id
lowercase : Tuple =attention_types
lowercase : List[str] =self.expand_attention_types_params(UpperCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
@staticmethod
def A__ ( UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
lowercase : int =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase_ ( __A : Any , __A : Tuple , __A : Tuple , __A : Dict ) -> List[str]:
"""simple docstring"""
import torch
lowercase : str =input.size()
lowercase : List[str] =len(__A )
lowercase : Optional[Any] =shape[dimension]
lowercase : Optional[Any] =torch.arange(0 , __A , __A )
lowercase : List[str] =torch.div(sizedim - size , __A , rounding_mode='''floor''' ) + 1
lowercase : Optional[int] =torch.arange(__A ) + low_indices[:min_length][:, None]
lowercase : List[Any] =[slice(__A )] * rank
lowercase : Dict =indices
lowercase : str =input[s]
lowercase : str =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__A )
def lowercase_ ( __A : Tuple , __A : int ) -> List[str]:
"""simple docstring"""
import torch
lowercase : Any =torch.arange(1 , __A )
lowercase : Union[str, Any] =torch.remainder(__A , __A )
lowercase : Any =remainders == 0
lowercase : List[str] =candidates[divisor_indices]
lowercase : Optional[int] =torch.max(__A )
return largest_divisor, torch.div(__A , __A , rounding_mode='''floor''' )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@property
def A__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase : Union[str, Any] =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='''inputs''' )
lowercase : List[str] ={0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase : Dict ={0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def A__ ( self : int ) -> int:
'''simple docstring'''
return self._config.num_heads
def A__ ( self : List[str] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase : List[Any] =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase : Tuple =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase : Optional[Any] =seqlen + 2
lowercase : List[str] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : Optional[Any] =[
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase : Any =common_inputs['''attention_mask''']
if self.use_past:
lowercase : List[Any] =ordered_inputs['''attention_mask'''].dtype
lowercase : List[str] =torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A__ ( self : str ) -> int:
'''simple docstring'''
return 13
| 94
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 549
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCamelCase : Any ={
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = "speech_to_text"
_UpperCAmelCase = ["past_key_values"]
_UpperCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __lowerCamelCase=10000 , __lowerCamelCase=12 , __lowerCamelCase=2048 , __lowerCamelCase=4 , __lowerCamelCase=6 , __lowerCamelCase=2048 , __lowerCamelCase=4 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase="relu" , __lowerCamelCase=256 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=2 , __lowerCamelCase=True , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=6000 , __lowerCamelCase=1024 , __lowerCamelCase=2 , __lowerCamelCase=(5, 5) , __lowerCamelCase=1024 , __lowerCamelCase=80 , __lowerCamelCase=1 , **__lowerCamelCase , ):
'''simple docstring'''
__A : List[Any] = vocab_size
__A : str = d_model
__A : str = encoder_ffn_dim
__A : Any = encoder_layers
__A : Dict = encoder_attention_heads
__A : Optional[int] = decoder_ffn_dim
__A : Optional[int] = decoder_layers
__A : int = decoder_attention_heads
__A : Union[str, Any] = dropout
__A : Union[str, Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : str = activation_function
__A : int = init_std
__A : List[str] = encoder_layerdrop
__A : int = decoder_layerdrop
__A : Any = use_cache
__A : Optional[Any] = encoder_layers
__A : str = scale_embedding # scale factor will be sqrt(d_model) if True
__A : List[Any] = max_source_positions
__A : Union[str, Any] = max_target_positions
__A : Dict = num_conv_layers
__A : Dict = list(__lowerCamelCase )
__A : Any = conv_channels
__A : Optional[int] = input_feat_per_channel
__A : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 237
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase : Optional[Any] ='''src/transformers'''
lowerCamelCase : Optional[int] ='''docs/source/en'''
lowerCamelCase : Dict ='''.'''
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> List[Any]:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[int] = f.readlines()
# Find the start prompt.
__A : List[str] = 0
while not lines[start_index].startswith(_SCREAMING_SNAKE_CASE ):
start_index += 1
start_index += 1
__A : Optional[int] = start_index
while not lines[end_index].startswith(_SCREAMING_SNAKE_CASE ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase : List[str] ='''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[int] =re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase : Optional[int] =re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Optional[int] =re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Optional[Any] =direct_transformers_import(TRANSFORMERS_PATH)
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
'''simple docstring'''
__A : int = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _SCREAMING_SNAKE_CASE )
return [m.group(0 ) for m in matches]
def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
'''simple docstring'''
__A : Union[str, Any] = 2 if text == '✅' or text == '❌' else len(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = (width - text_length) // 2
__A : int = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
__A : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__A : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__A : List[str] = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__A : Tuple = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : List[str] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : List[Any] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : List[Any] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
# Let's lookup through all transformers object (once).
for attr_name in dir(_SCREAMING_SNAKE_CASE ):
__A : List[Any] = None
if attr_name.endswith('Tokenizer' ):
__A : List[str] = slow_tokenizers
__A : Dict = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__A : Dict = fast_tokenizers
__A : List[Any] = attr_name[:-13]
elif _re_tf_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__A : int = tf_models
__A : Dict = _re_tf_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_flax_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__A : Tuple = flax_models
__A : Union[str, Any] = _re_flax_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_pt_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__A : Optional[int] = pt_models
__A : str = _re_pt_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
if lookup_dict is not None:
while len(_SCREAMING_SNAKE_CASE ) > 0:
if attr_name in model_name_to_prefix.values():
__A : int = True
break
# Try again after removing the last word in the name
__A : Any = ''.join(camel_case_split(_SCREAMING_SNAKE_CASE )[:-1] )
# Let's build that table!
__A : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__A : Any = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__A : Tuple = [len(_SCREAMING_SNAKE_CASE ) + 2 for c in columns]
__A : int = max([len(_SCREAMING_SNAKE_CASE ) for name in model_names] ) + 2
# Build the table per se
__A : Any = '|' + '|'.join([_center_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__A : int = {True: '✅', False: '❌'}
for name in model_names:
__A : str = model_name_to_prefix[name]
__A : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for l, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] ) + "|\n"
return table
def _lowercase ( _SCREAMING_SNAKE_CASE : List[str]=False ) -> Any:
'''simple docstring'''
__A , __A , __A , __A : Tuple = _find_text_in_file(
filename=os.path.join(_SCREAMING_SNAKE_CASE , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__A : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowerCamelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase : Optional[Any] =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 237
| 1
|
def UpperCAmelCase ( UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE_ = [True] * (num + 1)
SCREAMING_SNAKE_CASE_ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 393
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( a_ , unittest.TestCase ):
_A : List[str] = XLMRobertaTokenizer
_A : List[str] = XLMRobertaTokenizerFast
_A : Optional[Any] = True
_A : List[str] = True
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = """<pad>"""
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case__ ) , 10_02 )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
UpperCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCAmelCase = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(snake_case__ )
UpperCAmelCase = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case__ , f.name )
UpperCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=snake_case__ )
UpperCAmelCase = pickle.dumps(snake_case__ )
pickle.loads(snake_case__ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = tokenizer.tokenize(snake_case__ )
UpperCAmelCase = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(snake_case__ )
UpperCAmelCase = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = """Hello World!"""
UpperCAmelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCAmelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 673
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""})
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self , A_ , A_=0 )-> Any:
'''simple docstring'''
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 432
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = True , **A_ , )-> None:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = size if size is not None else {'shortest_edge': 224}
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase = crop_size if crop_size is not None else {'height': 256, 'width': 256}
UpperCamelCase = get_size_dict(A_ , param_name='crop_size' )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_flip_channel_order
def UpperCAmelCase_ ( self , A_ , A_ , A_ = PIL.Image.BILINEAR , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase = get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ , )-> List[str]:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> np.ndarray:
'''simple docstring'''
return flip_channel_order(A_ , data_format=A_ )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , )-> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(A_ , param_name='crop_size' )
UpperCamelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=A_ , scale=A_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase = [self.flip_channel_order(image=A_ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Dict:
'''simple docstring'''
UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
UpperCamelCase = target_sizes.numpy()
UpperCamelCase = []
for idx in range(len(A_ ) ):
UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=A_ )
UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
UpperCamelCase = logits.argmax(dim=1 )
UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 432
| 1
|
from __future__ import annotations
def __A ( _A , _A ):
"""simple docstring"""
__a = 0
__a = len(_lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__a = i + 1
else:
__a = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 197
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : List[Any]=None ) -> Optional[Any]:
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = field(
metadata={'''help''': '''The csv file to plot.'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
__UpperCamelCase = list_field(
default=lowerCamelCase__ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def __snake_case ( _lowerCAmelCase : int ) -> Optional[Any]:
try:
int(_lowerCAmelCase )
return True
except ValueError:
return False
def __snake_case ( _lowerCAmelCase : str ) -> Union[str, Any]:
try:
float(_lowerCAmelCase )
return True
except ValueError:
return False
class __magic_name__ :
"""simple docstring"""
def __init__( self :Any , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = args
A_ : Dict = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
A_ : Union[str, Any] = csv.DictReader(snake_case )
for row in reader:
A_ : Any = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
A_ : List[Any] = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
A_ : Optional[Any] = float(row["result"] )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ , A_ : List[str] = plt.subplots()
A_ : List[str] = "Time usage" if self.args.is_time else "Memory usage"
A_ : Optional[Any] = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
A_ : int = sorted(set(self.result_dict[model_name]["bsz"] ) )
A_ : List[str] = sorted(set(self.result_dict[model_name]["seq_len"] ) )
A_ : Dict = self.result_dict[model_name]["result"]
((A_) , (A_)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
A_ : List[Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
A_ : Dict = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=snake_case , )
else:
A_ : Union[str, Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((A_) , (A_)) : Optional[Any] = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
A_ : List[Any] = np.asarray(snake_case , snake_case )[: len(snake_case )]
plt.scatter(
snake_case , snake_case , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(snake_case , snake_case , "--" )
title_str += f" {label_model_name} vs."
A_ : Optional[int] = title_str[:-4]
A_ : List[str] = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(snake_case )
plt.xlabel(snake_case )
plt.ylabel(snake_case )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Any:
A_ : Any = HfArgumentParser(_lowerCAmelCase )
A_ : str = parser.parse_args_into_dataclasses()[0]
A_ : Optional[Any] = Plot(args=_lowerCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 454
| 0
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
__lowerCamelCase : Any = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__lowerCamelCase : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase_ )} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
default=UpperCamelCase_ , metadata={"help": "The input training data file (a text file)."} )
a_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a_ = field(default=UpperCamelCase_ , metadata={"help": "Whether ot not to use whole word mask."} )
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE ( snake_case_ : DataTrainingArguments , snake_case_ : PreTrainedTokenizer , snake_case_ : bool = False , snake_case_ : Optional[str] = None , ):
def _dataset(snake_case_ : str , snake_case_ : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , ref_path=snake_case_ , )
return LineByLineTextDataset(tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__, snake_case__, snake_case__ : List[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
snake_case__ : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
snake_case__ : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
snake_case__ : Optional[int] = AutoModelWithLMHead.from_config(snake_case_ )
model.resize_token_embeddings(len(snake_case_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
snake_case__ : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
snake_case__ : Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
snake_case__ : Optional[int] = (
get_dataset(snake_case_ , tokenizer=snake_case_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
snake_case__ : str = (
get_dataset(snake_case_ , tokenizer=snake_case_ , evaluate=snake_case_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
snake_case__ : int = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
snake_case__ : int = DataCollatorForWholeWordMask(
tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability )
else:
snake_case__ : List[str] = DataCollatorForLanguageModeling(
tokenizer=snake_case_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ : Optional[Any] = Trainer(
model=snake_case_ , args=snake_case_ , data_collator=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , prediction_loss_only=snake_case_ , )
# Training
if training_args.do_train:
snake_case__ : Any = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=snake_case_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case__ : Union[str, Any] = trainer.evaluate()
snake_case__ : str = math.exp(eval_output["eval_loss"] )
snake_case__ : List[str] = {"perplexity": perplexity}
snake_case__ : int = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(snake_case_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , snake_case_ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(snake_case_ )
return results
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 25
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case : str = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
A__ : List[Any] =self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ : Optional[int] ="""src/transformers"""
shutil.rmtree(self.transformer_dir )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=None ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A__ : List[str] =comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
A__ : int =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ : Any =black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
A__ : Optional[Any] =os.path.join(self.transformer_dir , """new_code.py""" )
with open(lowerCAmelCase_ , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ : Tuple =check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
A__ : List[str] ="""TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowerCAmelCase_ , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
A__ : int =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
A__ : Dict =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ : List[str] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
A__ , A__ : int =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
self.assertFalse(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ , A__ : List[str] =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase_ )
A__ : str =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
A__ : Optional[int] =(
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ : Union[str, Any] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ , A__ : Dict =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 215
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : int, __snake_case : int, __snake_case : Tuple ) -> Dict:
"""simple docstring"""
if index == r:
for j in range(__snake_case ):
print(data[j], end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
A__ : Optional[int] =arr[i]
combination_util(__snake_case, __snake_case, __snake_case, index + 1, __snake_case, i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCamelCase ( __snake_case : Any, __snake_case : Dict, __snake_case : str ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =[0] * r
# Print all combination using temporary array 'data[]'
combination_util(__snake_case, __snake_case, __snake_case, 0, __snake_case, 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case : List[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 215
| 1
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> str:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowercase):
__UpperCamelCase :Union[str, Any] = AutoConfig.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
__UpperCamelCase :int = FlaxAutoModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
@slow
def UpperCamelCase__ ( self) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowercase):
__UpperCamelCase :Union[str, Any] = AutoConfig.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
__UpperCamelCase :Union[str, Any] = FlaxAutoModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
@slow
def UpperCamelCase__ ( self) -> List[Any]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = FlaxBertModel.from_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**__lowercase):
return model(**__lowercase)
eval(**__lowercase).block_until_ready()
@slow
def UpperCamelCase__ ( self) -> List[str]:
for model_name in ["roberta-base", "roberta-large"]:
__UpperCamelCase :int = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :int = FlaxRobertaModel.from_pretrained(__lowercase)
__UpperCamelCase :Dict = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**__lowercase):
return model(**__lowercase)
eval(**__lowercase).block_until_ready()
def UpperCamelCase__ ( self) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , '''bert-base is not a local folder and is not a valid model identifier'''):
__UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained('''bert-base''')
def UpperCamelCase__ ( self) -> Dict:
with self.assertRaisesRegex(
__lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained(__lowercase , revision='''aaaaaa''')
def UpperCamelCase__ ( self) -> Dict:
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
__UpperCamelCase :int = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
with self.assertRaisesRegex(__lowercase , '''Use `from_pt=True` to load this model'''):
__UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''')
| 452
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , '''html.parser''' )
__UpperCamelCase :int = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
__UpperCamelCase :Union[str, Any] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowercase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 452
| 1
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ):
model.train()
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = F.mse_loss(__snake_case , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__snake_case )
def __lowerCAmelCase ( __snake_case , __snake_case=False ):
set_seed(42 )
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(__snake_case )
__lowerCAmelCase = RegressionDataset(length=80 )
__lowerCAmelCase = DataLoader(__snake_case , batch_size=16 )
model.to(accelerator.device )
if sched:
__lowerCAmelCase = AdamW(params=model.parameters() , lr=1E-3 )
__lowerCAmelCase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowerCAmelCase = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.65 )
__lowerCAmelCase = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.65 )
# Make a copy of `model`
if sched:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(__snake_case , __snake_case , __snake_case , __snake_case )
else:
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(__snake_case , __snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCAmelCase ( __snake_case ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_training_setup(__snake_case )
# Use a single batch
__lowerCAmelCase , __lowerCAmelCase = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__lowerCAmelCase = ddp_input[torch.randperm(len(__snake_case ) )]
def __lowerCAmelCase ( __snake_case ):
# Test on distributed setup that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_training_setup(__snake_case )
# Use a single batch
__lowerCAmelCase , __lowerCAmelCase = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__lowerCAmelCase = ddp_input[torch.randperm(len(__snake_case ) )]
def __lowerCAmelCase ( __snake_case=False , __snake_case=False ):
__lowerCAmelCase = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_training_setup(__snake_case )
for iteration, batch in enumerate(__snake_case ):
__lowerCAmelCase , __lowerCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__lowerCAmelCase = ddp_input[torch.randperm(len(__snake_case ) )]
GradientState._reset_state()
def __lowerCAmelCase ( __snake_case=False , __snake_case=False ):
__lowerCAmelCase = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_training_setup(__snake_case , __snake_case )
for iteration, batch in enumerate(__snake_case ):
__lowerCAmelCase , __lowerCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
__lowerCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCAmelCase ( ):
__lowerCAmelCase = Accelerator()
__lowerCAmelCase = RegressionDataset(length=80 )
__lowerCAmelCase = DataLoader(__snake_case , batch_size=16 )
__lowerCAmelCase = RegressionDataset(length=96 )
__lowerCAmelCase = DataLoader(__snake_case , batch_size=16 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(__snake_case , __snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if iteration < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if batch_num < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCAmelCase ( ):
__lowerCAmelCase = Accelerator()
__lowerCAmelCase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(__snake_case , __snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(__snake_case , __snake_case )
def __lowerCAmelCase ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 367
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( __snake_case = "" ):
__lowerCAmelCase = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__lowerCAmelCase = BeautifulSoup(requests.get(__snake_case ).text , "html.parser" )
__lowerCAmelCase = soup.find_all("td" , attrs="titleColumn" )
__lowerCAmelCase = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__snake_case , __snake_case )
}
def __lowerCAmelCase ( __snake_case = "IMDb_Top_250_Movies.csv" ):
__lowerCAmelCase = get_imdb_top_aaa_movies()
with open(__snake_case , "w" , newline="" ) as out_file:
__lowerCAmelCase = csv.writer(__snake_case )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 367
| 1
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __snake_case ( ) -> str:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=UpperCamelCase__ , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=UpperCamelCase__ , default=5 )
parser.add_argument('--batch_size' , type=UpperCamelCase__ , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=UpperCamelCase__ , default=1 )
parser.add_argument('--freeze' , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument('--learning_rate' , type=UpperCamelCase__ , default=5E-4 )
parser.add_argument('--seed' , type=UpperCamelCase__ , default=0 )
parser.add_argument('--lr_scheduler_type' , type=UpperCamelCase__ , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=UpperCamelCase__ , default=10 )
parser.add_argument('--weight_decay' , type=UpperCamelCase__ , default=0.0_1 )
parser.add_argument('--output_dir' , type=UpperCamelCase__ , default='./results' )
return parser.parse_args()
UpperCamelCase : Dict = load("accuracy")
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
A , A = eval_pred
A = np.argmax(UpperCamelCase__ , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __init__( self : Tuple , _lowercase : Union[str, Any] ):
super().__init__()
A = trainer
def __a ( self : List[Any] , _lowercase : Optional[int] , _lowercase : str , _lowercase : Any , **_lowercase : str ):
if control.should_evaluate:
A = deepcopy(_lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def __snake_case ( ) -> int:
"""simple docstring"""
A = get_args()
set_seed(args.seed )
A = load_dataset('codeparrot/codecomplex' , split='train' )
A = dataset.train_test_split(test_size=0.2 )
A = train_test['test'].train_test_split(test_size=0.5 )
A = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(UpperCamelCase__ ):
A = tokenizer(example['src'] , truncation=UpperCamelCase__ , max_length=1024 )
A = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=train_test_validation['train'].column_names , )
A = DataCollatorWithPadding(tokenizer=UpperCamelCase__ )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
A = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
print('Training...' )
trainer.add_callback(CustomCallback(UpperCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 91
|
"""simple docstring"""
from math import factorial
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
A = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 91
| 1
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
def constraint_to_multiple_of(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 , __UpperCamelCase=None ):
A_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A_ = math.floor(val / multiple ) * multiple
if x < min_val:
A_ = math.ceil(val / multiple ) * multiple
return x
A_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
A_ = get_image_size(a_ )
A_ = output_size
# determine new height and width
A_ = output_height / input_height
A_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A_ = scale_width
else:
# fit height
A_ = scale_height
A_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
A_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class __lowercase ( _A ):
__magic_name__ : int = ['''pixel_values''']
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = False , a__ = 1 , a__ = True , a__ = 1 / 2_5_5 , a__ = True , a__ = None , a__ = None , **a__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**a_ )
A_ = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
A_ = get_size_dict(a_ )
A_ = do_resize
A_ = size
A_ = keep_aspect_ratio
A_ = ensure_multiple_of
A_ = resample
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self , a__ , a__ , a__ = False , a__ = 1 , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ) -> Tuple:
'''simple docstring'''
A_ = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}" )
A_ = get_resize_output_image_size(
a_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=a_ , multiple=a_ , )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def lowerCAmelCase_ ( self , a__ , a__ , a__ = None , **a__ , ) -> List[Any]:
'''simple docstring'''
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ = None , **a__ , ) -> Dict:
'''simple docstring'''
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def lowerCAmelCase_ ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ) -> int:
'''simple docstring'''
A_ = do_resize if do_resize is not None else self.do_resize
A_ = size if size is not None else self.size
A_ = get_size_dict(a_ )
A_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A_ = resample if resample is not None else self.resample
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(a_ ) for image in images]
if do_resize:
A_ = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_rescale:
A_ = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
A_ = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
A_ = [to_channel_dimension_format(a_ , a_ ) for image in images]
A_ = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
def lowerCAmelCase_ ( self , a__ , a__ = None ) -> Tuple:
'''simple docstring'''
A_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(a_ ):
A_ = target_sizes.numpy()
A_ = []
for idx in range(len(a_ ) ):
A_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=a_ )
A_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
A_ = logits.argmax(dim=1 )
A_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 141
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 525
| 0
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Dict ):
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_json_file(__snake_case )
print(F"Building PyTorch model from configuration: {config}" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 707
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __get__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__SCREAMING_SNAKE_CASE : Any = """__cached_""" + self.fget.__name__
__SCREAMING_SNAKE_CASE : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if cached is None:
__SCREAMING_SNAKE_CASE : int = self.fget(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return cached
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : List[str] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
return isinstance(_lowerCamelCase , np.ndarray )
def lowerCAmelCase_ ( _lowerCamelCase: Dict ):
return _is_numpy(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Dict ):
import torch
return isinstance(_lowerCamelCase , torch.device )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def lowerCAmelCase_ ( _lowerCamelCase: List[str] ):
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = fields(self )
# Safety and consistency checks
if not len(lowerCAmelCase__ ):
raise ValueError(F"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"{self.__class__.__name__} should not have more than one required field." )
__SCREAMING_SNAKE_CASE : Dict = getattr(self , class_fields[0].name )
__SCREAMING_SNAKE_CASE : Dict = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = first_field.items()
__SCREAMING_SNAKE_CASE : Dict = True
else:
try:
__SCREAMING_SNAKE_CASE : List[Any] = iter(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = True
except TypeError:
__SCREAMING_SNAKE_CASE : int = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCAmelCase__ ):
if (
not isinstance(lowerCAmelCase__ , (list, tuple) )
or not len(lowerCAmelCase__ ) == 2
or not isinstance(element[0] , lowerCAmelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__SCREAMING_SNAKE_CASE : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = element[1]
elif first_field is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = first_field
else:
for field in class_fields:
__SCREAMING_SNAKE_CASE : List[Any] = getattr(self , field.name )
if v is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = v
def __delitem__( self : Optional[Any] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
raise Exception(F"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def UpperCamelCase__ ( self : str , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
raise Exception(F"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def UpperCamelCase__ ( self : str , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
raise Exception(F"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def UpperCamelCase__ ( self : int , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
raise Exception(F"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCAmelCase__ , lowerCAmelCase__ )
super().__setattr__(lowerCAmelCase__ , lowerCAmelCase__ )
def __setitem__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
super().__setitem__(lowerCAmelCase__ , lowerCAmelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls : List[Any] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
raise ValueError(
F"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Dict = '''longest'''
_A : Optional[Any] = '''max_length'''
_A : Tuple = '''do_not_pad'''
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = '''pt'''
_A : Union[str, Any] = '''tf'''
_A : Union[str, Any] = '''np'''
_A : Dict = '''jax'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase__ : List[ContextManager] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = context_managers
__SCREAMING_SNAKE_CASE : Dict = ExitStack()
def __enter__( self : Union[str, Any] ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(lowerCAmelCase__ )
def __exit__( self : Any , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
self.stack.__exit__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
__SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__SCREAMING_SNAKE_CASE : int = inspect.signature(model_class.forward ) # PyTorch models
else:
__SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class.__name__
__SCREAMING_SNAKE_CASE : List[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__SCREAMING_SNAKE_CASE : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
__SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCAmelCase_ ( _lowerCamelCase: MutableMapping , _lowerCamelCase: str = "" , _lowerCamelCase: str = "." ):
def _flatten_dict(_lowerCamelCase: str , _lowerCamelCase: Any="" , _lowerCamelCase: List[Any]="." ):
for k, v in d.items():
__SCREAMING_SNAKE_CASE : Tuple = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict ):
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Dict=None ):
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: List[str] ):
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: Any ):
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
__SCREAMING_SNAKE_CASE : Dict = [F"{repo_id}--{v}" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__SCREAMING_SNAKE_CASE : Any = F"{repo_id}--{value}"
return auto_map
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
for base_class in inspect.getmro(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = base_class.__module__
__SCREAMING_SNAKE_CASE : Any = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 178
| 0
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' ,type=__UpperCamelCase ,default=1 ,help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' ,type=__UpperCamelCase ,help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) ,)
# rest from the training program
parser.add_argument('training_script_args' ,nargs=__UpperCamelCase )
return parser.parse_args()
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = parse_args()
# Import training_script as a module.
lowerCamelCase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase_ = script_fpath.stem
lowerCamelCase_ = importlib.import_module(__UpperCamelCase )
# Patch sys.argv
lowerCamelCase_ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 42
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356
| 0
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = {}
__lowercase = job['''started_at''']
__lowercase = job['''completed_at''']
__lowercase = date_parser.parse(lowercase )
__lowercase = date_parser.parse(lowercase )
__lowercase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__lowercase = start
__lowercase = end
__lowercase = duration_in_min
return job_info
def UpperCAmelCase ( lowercase , lowercase=None ):
"""simple docstring"""
__lowercase = None
if token is not None:
__lowercase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
__lowercase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
__lowercase = requests.get(lowercase , headers=lowercase ).json()
__lowercase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowercase ) for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowercase ):
__lowercase = requests.get(url + F"&page={i + 2}" , headers=lowercase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowercase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
__a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
__a : Optional[int] = parser.parse_args()
__a : Tuple = get_job_time(args.workflow_run_id)
__a : Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 522
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a : str = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
elif args.student_type == "gpt2":
__lowercase = False
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=lowercase , required=lowercase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=lowercase , required=lowercase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=lowercase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=lowercase , required=lowercase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=lowercase , type=lowercase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=lowercase , required=lowercase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=lowercase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=lowercase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=lowercase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=lowercase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=lowercase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=lowercase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=lowercase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=lowercase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=lowercase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=lowercase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=lowercase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=lowercase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=lowercase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=lowercase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowercase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=lowercase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=lowercase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=lowercase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowercase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=lowercase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=lowercase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=lowercase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=lowercase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=lowercase , default=4000 , help='''Checkpoint interval.''' )
__lowercase = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.student_type]
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowercase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowercase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowercase = tokenizer.all_special_tokens.index(lowercase )
__lowercase = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
__lowercase = special_tok_ids
__lowercase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
__lowercase = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowercase = 0.0 # do not predict special tokens
__lowercase = torch.from_numpy(lowercase )
else:
__lowercase = None
__lowercase = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
__lowercase = student_config_class.from_pretrained(args.student_config )
__lowercase = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
__lowercase = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
__lowercase = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
__lowercase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowercase = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 522
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase :Union[str, Any] = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def __snake_case ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[str]:
_a = True
while ask_again:
_a = input(_UpperCamelCase )
try:
if default is not None and len(_UpperCamelCase ) == 0:
return default
return convert_value(_UpperCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_UpperCamelCase )
def __snake_case ( _UpperCamelCase , _UpperCamelCase=[] , _UpperCamelCase=None , _UpperCamelCase=0 ) -> List[str]:
_a = BulletMenu(_UpperCamelCase , _UpperCamelCase )
_a = menu.run(default_choice=_UpperCamelCase )
return convert_value(_UpperCamelCase ) if convert_value is not None else result
def __snake_case ( _UpperCamelCase ) -> Dict:
_a = int(_UpperCamelCase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def __snake_case ( _UpperCamelCase ) -> Union[str, Any]:
_a = int(_UpperCamelCase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def __snake_case ( _UpperCamelCase ) -> Optional[Any]:
_a = int(_UpperCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __snake_case ( _UpperCamelCase ) -> str:
_a = int(_UpperCamelCase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def __snake_case ( _UpperCamelCase ) -> Tuple:
_a = int(_UpperCamelCase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def __snake_case ( _UpperCamelCase ) -> List[Any]:
return {"yes": True, "no": False}[value.lower()]
class UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
def _A ( self: Dict , __UpperCamelCase: Dict , __UpperCamelCase: List[str] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: int ):
_a = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_a = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 487
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __snake_case ( _UpperCamelCase ) -> Optional[Any]:
_a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> List[str]:
_a , _a = emb.weight.shape
_a = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
_a = emb.weight.data
return lin_layer
def __snake_case ( _UpperCamelCase , _UpperCamelCase="facebook/mbart-large-en-ro" , _UpperCamelCase=False , _UpperCamelCase=False ) -> Union[str, Any]:
_a = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(_UpperCamelCase )
_a = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_a = MBartConfig.from_pretrained(_UpperCamelCase , vocab_size=_UpperCamelCase )
if mbart_aa and finetuned:
_a = '''relu'''
_a = state_dict['''decoder.embed_tokens.weight''']
_a = MBartForConditionalGeneration(_UpperCamelCase )
model.model.load_state_dict(_UpperCamelCase )
if finetuned:
_a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase :Optional[int] = parser.parse_args()
lowerCamelCase :Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 487
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 384
if "tiny" in model_name:
lowercase__ = [3, 3, 9, 3]
lowercase__ = [96, 192, 384, 768]
if "small" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [96, 192, 384, 768]
if "base" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [128, 256, 512, 1024]
lowercase__ = 512
if "large" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [192, 384, 768, 1536]
lowercase__ = 768
if "xlarge" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [256, 512, 1024, 2048]
lowercase__ = 1024
# set label information
lowercase__ = 150
lowercase__ = "huggingface/label-files"
lowercase__ = "ade20k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE_ , hidden_sizes=SCREAMING_SNAKE_CASE_ , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowercase__ = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["state_dict"]
lowercase__ = get_upernet_config(SCREAMING_SNAKE_CASE_ )
lowercase__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "bn" in key:
lowercase__ = key.replace("bn" , "batch_norm" )
lowercase__ = val
# rename keys
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify on image
lowercase__ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" )
lowercase__ = SegformerImageProcessor()
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
if model_name == "upernet-convnext-tiny":
lowercase__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowercase__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowercase__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowercase__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowercase__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'upernet-convnext-{size}' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 703
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowercase__ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f'''bert/{name}'''
def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = session.run(SCREAMING_SNAKE_CASE_ )
print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" )
lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37
| 0
|
from torch import nn
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 67
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase : Optional[int] = False
@skip_mps
class _UpperCamelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase = False
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _UpperCAmelCase ( cls ) -> List[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
A = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A = CLIPTextModel(a__ )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self , a__ , a__=0 ) -> Optional[Any]:
if str(a__ ).startswith("""mps""" ):
A = torch.manual_seed(a__ )
else:
A = torch.Generator(device=a__ ).manual_seed(a__ )
A = A = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = """cpu"""
A = self.get_dummy_components()
A = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs(a__ )
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def _UpperCAmelCase ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _UpperCAmelCase ( self ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> int:
super().test_save_load_local(expected_max_difference=5e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> int:
A = torch.manual_seed(51 )
A = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=a__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
A = """a painting of an elephant with glasses"""
A = [5, 7]
A = pipe(
prompt=a__ , token_indices=a__ , guidance_scale=7.5 , generator=a__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 641
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(snake_case ):
__magic_name__ : Optional[int] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
__magic_name__ : Optional[Any] = FlaxAutoModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(snake_case ):
__magic_name__ : List[str] = AutoConfig.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
__magic_name__ : Tuple = FlaxAutoModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
self.assertIsInstance(snake_case , snake_case )
@slow
def _UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(snake_case )
__magic_name__ : int = FlaxBertModel.from_pretrained(snake_case )
__magic_name__ : Dict = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case : Optional[Any] ):
return model(**snake_case )
eval(**snake_case ).block_until_ready()
@slow
def _UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__magic_name__ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case )
__magic_name__ : Dict = FlaxRobertaModel.from_pretrained(snake_case )
__magic_name__ : Dict = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case : str ):
return model(**snake_case )
eval(**snake_case ).block_until_ready()
def _UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , '''bert-base is not a local folder and is not a valid model identifier''' ):
__magic_name__ : str = FlaxAutoModel.from_pretrained('''bert-base''' )
def _UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__magic_name__ : Tuple = FlaxAutoModel.from_pretrained(snake_case , revision='''aaaaaa''' )
def _UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
__magic_name__ : Optional[Any] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(snake_case , '''Use `from_pt=True` to load this model''' ):
__magic_name__ : Optional[Any] = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 147
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCamelCase : float ) -> float:
"""simple docstring"""
return 10 - x * x
def UpperCamelCase_ ( lowerCamelCase : float , lowerCamelCase : float ) -> float:
"""simple docstring"""
if equation(lowerCamelCase ) * equation(lowerCamelCase ) >= 0:
raise ValueError('''Wrong space!''' )
__magic_name__ : int = a
while (b - a) >= 0.0_1:
# Find middle point
__magic_name__ : str = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase ) * equation(lowerCamelCase ) < 0:
__magic_name__ : Union[str, Any] = c
else:
__magic_name__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 147
| 1
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( lowerCamelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = eval_examples
UpperCamelCase = post_process_function
def A__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "eval" ) -> Any:
"""simple docstring"""
UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase = time.time()
try:
UpperCamelCase = eval_loop(
_SCREAMING_SNAKE_CASE , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , metric_key_prefix=_SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase = compute_metrics
UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCamelCase = metrics.pop(_SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _SCREAMING_SNAKE_CASE )
return metrics
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "test" ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.get_test_dataloader(_SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase = time.time()
try:
UpperCamelCase = eval_loop(
_SCREAMING_SNAKE_CASE , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , metric_key_prefix=_SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase = compute_metrics
UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions , """predict""" )
UpperCamelCase = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCamelCase = metrics.pop(_SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_SCREAMING_SNAKE_CASE )
| 301
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( lowerCamelCase ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """LayoutLMv3ImageProcessor"""
lowercase = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
UpperCamelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase = features["""words"""]
UpperCamelCase = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel values
UpperCamelCase = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase = self.get_overflowing_images(_SCREAMING_SNAKE_CASE , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase = images
return encoded_inputs
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F" {len(_SCREAMING_SNAKE_CASE )} and {len(_SCREAMING_SNAKE_CASE )}" )
return images_with_overflow
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 301
| 1
|
def lowerCamelCase_ ( _lowercase , _lowercase = " " ) -> list:
__A : str = []
__A : int = 0
for index, char in enumerate(_lowercase ):
if char == separator:
split_words.append(string[last_index:index] )
__A : str = index + 1
elif index + 1 == len(_lowercase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 387
|
from collections import Counter
from timeit import timeit
def lowerCamelCase_ ( _lowercase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def lowerCamelCase_ ( _lowercase = "" ) -> bool:
if len(_lowercase ) == 0:
return True
__A : List[Any] = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__A : dict[str, int] = {}
for character in lower_case_input_str:
__A : str = character_freq_dict.get(_lowercase , 0 ) + 1
__A : Any = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCamelCase_ ( _lowercase = "" ) -> None:
print("\nFor string = " , _lowercase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
UpperCamelCase = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
UpperCamelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 387
| 1
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 1_2_8_0_2_2
UpperCamelCase_ = 1_2_8_0_2_8
@require_sentencepiece
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[str] = MaMaaaTokenizer
A : Optional[int] = False
A : Tuple = False
A : Optional[Any] = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : str = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
SCREAMING_SNAKE_CASE : List[str] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : List[str] = Path(self.tmpdirname )
save_json(A, save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A, save_dir / VOCAB_FILES_NAMES['spm_file'] )
SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '</s>'
SCREAMING_SNAKE_CASE : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ), A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0], '</s>' )
self.assertEqual(vocab_keys[1], '<unk>' )
self.assertEqual(vocab_keys[-1], '<s>' )
self.assertEqual(len(A ), tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ), [2, 3, 4, 5, 6], )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_string(A )
self.assertEqual(A, 'This is a test' )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'input_ids': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A, model_name='facebook/m2m100_418M', revision='c168bae485c864188cf9aa0e4108b0b6934dc91e', )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Dict = '''facebook/m2m100_418M'''
A : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
A : Optional[int] = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
A : Dict = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en', tgt_lang='fr' )
SCREAMING_SNAKE_CASE : List[Any] = 1
return cls
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ), 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ), 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ), 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ), 128_063 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.get_vocab()
self.assertEqual(len(A ), self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'], 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 'en'
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertIn(A, self.tokenizer.all_special_ids )
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(A, skip_special_tokens=A )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=A )
self.assertEqual(A, A )
self.assertNotIn(self.tokenizer.eos_token, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[int] = MaMaaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.lang_token_to_id, A )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 'en'
SCREAMING_SNAKE_CASE : Tuple = 'fr'
SCREAMING_SNAKE_CASE : Any = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right(
batch['labels'], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id )
for k in batch:
SCREAMING_SNAKE_CASE : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
SCREAMING_SNAKE_CASE : str = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
SCREAMING_SNAKE_CASE : int = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en', tgt_lang='ar' )
self.assertEqual(
nested_simplify(A ), {
# en_XX, A, test, EOS
'input_ids': [[128_022, 58, 4_183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 128_006,
}, )
| 28
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , )-> str:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
UpperCamelCase = TaConfig(
vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(UpperCAmelCase_ ):
UpperCamelCase = TaBlock(UpperCAmelCase_ )
self.encoders.append(UpperCAmelCase_ )
UpperCamelCase = TaLayerNorm(UpperCAmelCase_ )
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.token_embedder(UpperCAmelCase_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase_ )
UpperCamelCase = self.dropout_pre(UpperCAmelCase_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ )
for lyr in self.encoders:
UpperCamelCase = lyr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
UpperCamelCase = self.layer_norm(UpperCAmelCase_ )
return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
| 554
| 0
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = DebertaVaTokenizer
a__ = DebertaVaTokenizerFast
a__ = True
a__ = True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : List[str] = DebertaVaTokenizer(__snake_case , unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self , __snake_case):
_UpperCamelCase : List[Any] = 'this is a test'
_UpperCamelCase : int = 'this is a test'
return input_text, output_text
def A__ ( self):
_UpperCamelCase : List[str] = '<pad>'
_UpperCamelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '<unk>')
self.assertEqual(vocab_keys[-1] , '[PAD]')
self.assertEqual(len(__snake_case) , 3_00_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00)
def A__ ( self):
# fmt: off
_UpperCamelCase : Union[str, Any] = ' \tHeLLo!how \n Are yoU? '
_UpperCamelCase : Tuple = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_UpperCamelCase : str = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case)
_UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case)
_UpperCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def A__ ( self):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def A__ ( self):
pass
def A__ ( self):
# fmt: off
_UpperCamelCase : List[str] = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : str = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_UpperCamelCase : int = DebertaVaTokenizer(__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Optional[int] = DebertaVaTokenizerFast(__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[Any] = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : List[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_UpperCamelCase : Optional[Any] = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
# fmt: off
_UpperCamelCase : Tuple = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : List[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_UpperCamelCase : Any = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
# fmt: off
_UpperCamelCase : Tuple = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : int = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_UpperCamelCase : List[str] = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : List[str] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
_UpperCamelCase : Dict = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_UpperCamelCase : List[Any] = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Union[str, Any] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer()
_UpperCamelCase : int = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
_UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : List[str] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
_UpperCamelCase : Optional[int] = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = self.get_rust_tokenizer()
_UpperCamelCase : List[str] = tokenizer.encode(__snake_case)
_UpperCamelCase : List[str] = rust_tokenizer.encode(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
_UpperCamelCase : Dict = 'This is a test'
_UpperCamelCase : Tuple = [13, 1, 43_98, 25, 21, 12_89]
_UpperCamelCase : Any = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
_UpperCamelCase : Any = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_UpperCamelCase : Optional[Any] = DebertaVaTokenizer(__snake_case , keep_accents=__snake_case)
_UpperCamelCase : Union[str, Any] = DebertaVaTokenizerFast(__snake_case , keep_accents=__snake_case)
_UpperCamelCase : Union[str, Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : List[Any] = tokenizer.tokenize(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Dict = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Dict = rust_tokenizer.tokenize(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
# fmt: off
_UpperCamelCase : Optional[Any] = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : List[str] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
_UpperCamelCase : List[str] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_UpperCamelCase : Any = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_UpperCamelCase : Any = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : str = tokenizer.tokenize(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Any = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = rust_tokenizer.tokenize(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = rust_tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = DebertaVaTokenizer(__snake_case)
_UpperCamelCase : Tuple = tokenizer.encode('sequence builders')
_UpperCamelCase : List[Any] = tokenizer.encode('multi-sequence build')
_UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(__snake_case)
_UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __snake_case)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __snake_case , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[Any] = {'input_ids': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 648
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 1
|
"""simple docstring"""
import os
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = os.path.join(os.path.dirname(__lowercase ) , """num.txt""" )
with open(__lowercase ) as file_hand:
return str(sum(int(__lowercase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 129
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=_SCREAMING_SNAKE_CASE ):
snake_case = ["speech"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ["""speech"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_SCREAMING_SNAKE_CASE ):
snake_case = ["speech"]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(self , ["""speech"""] )
| 129
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def A (__lowerCamelCase :float , __lowerCamelCase :int ):
_lowerCAmelCase = u
for i in range(1 , __lowerCamelCase ):
_lowerCAmelCase = temp * (u - i)
return temp
def A ():
_lowerCAmelCase = int(input("""enter the numbers of values: """ ) )
_lowerCAmelCase = []
for _ in range(__lowerCamelCase ):
y.append([] )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
y[i].append(__lowerCamelCase )
_lowerCAmelCase = 0
print("""enter the values of parameters in a list: """ )
_lowerCAmelCase = list(map(__lowerCamelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(__lowerCamelCase ):
_lowerCAmelCase = float(input() )
_lowerCAmelCase = int(input("""enter the value to interpolate: """ ) )
_lowerCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __lowerCamelCase ):
for j in range(n - i ):
_lowerCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
_lowerCAmelCase = y[0][0]
for i in range(1 , __lowerCamelCase ):
summ += (ucal(__lowerCamelCase , __lowerCamelCase ) * y[0][i]) / math.factorial(__lowerCamelCase )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {"""vocab_file""": """vocab.txt"""}
_lowercase = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_lowercase = {
"""openbmb/cpm-ant-10b""": 1024,
}
def A (__lowerCamelCase :str ):
_lowerCAmelCase = collections.OrderedDict()
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as reader:
_lowerCAmelCase = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
_lowerCAmelCase = token.rstrip("""\n""" )
_lowerCAmelCase = index
return vocab
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase="<unk>" , _lowercase=200 ):
"""simple docstring"""
_lowerCAmelCase = vocab
_lowerCAmelCase = unk_token
_lowerCAmelCase = max_input_chars_per_word
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase = 0
_lowerCAmelCase = []
while start < len(_lowercase ):
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = None
while start < end:
_lowerCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
_lowerCAmelCase = end
return sub_tokens
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = ['''input_ids''', '''attention_mask''']
_lowercase : Union[str, Any] = False
def __init__( self , _lowercase , _lowercase="<d>" , _lowercase="</d>" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="<pad>" , _lowercase="<unk>" , _lowercase="</n>" , _lowercase="</_>" , _lowercase="left" , **_lowercase , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
_lowerCAmelCase = bod_token
_lowerCAmelCase = eod_token
_lowerCAmelCase = load_vocab(_lowercase )
_lowerCAmelCase = self.encoder[space_token]
_lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def _lowercase ( self , _lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = [i for i in token_ids if i >= 0]
_lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return token in self.encoder
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return "".join(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.decoder.get(_lowercase , self.unk_token )
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if os.path.isdir(_lowercase ):
_lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase = 0
if " " in self.encoder:
_lowerCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase ))
| 162
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Tuple ="""switch_transformers"""
a_ :Optional[Any] =["""past_key_values"""]
a_ :Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Tuple=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_1 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2_8 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=1E-6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0_0_1 , SCREAMING_SNAKE_CASE__ : str=0.0_0_1 , SCREAMING_SNAKE_CASE__ : int=1.0 , SCREAMING_SNAKE_CASE__ : str="relu" , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_sparse_encoder_layers
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a = self.num_layers // self.num_sparse_encoder_layers
else:
__a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = router_bias
__a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__a = router_dtype
__a = router_ignore_padding_tokens
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = add_router_probs
__a = router_z_loss_coef
__a = router_aux_loss_coef
__a = self.feed_forward_proj.split("""-""" )
__a = act_info[-1]
__a = act_info[0] == """gated"""
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a = """gelu_new"""
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 582
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
def get_matched_characters(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__SCREAMING_SNAKE_CASE )
__a = F'''{_stra[0:_stra.index(__SCREAMING_SNAKE_CASE )]} {_stra[_stra.index(__SCREAMING_SNAKE_CASE ) + 1:]}'''
return "".join(__SCREAMING_SNAKE_CASE )
# matching characters
__a = get_matched_characters(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = get_matched_characters(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = len(__SCREAMING_SNAKE_CASE )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(__SCREAMING_SNAKE_CASE )
+ match_count / len(__SCREAMING_SNAKE_CASE )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 582
| 1
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowercase_ = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
lowercase_ = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Any = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bootstrap_aggregation=__SCREAMING_SNAKE_CASE , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bootstrap_aggregation=__SCREAMING_SNAKE_CASE , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = """rougeLsum"""
__snake_case : Dict = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
__snake_case : Optional[int] = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
assert score > score_no_sep
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : List[str] = ["""rouge1""", """rouge2""", """rougeL"""]
__snake_case : List[Any] = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE , rouge_keys=__SCREAMING_SNAKE_CASE )
__snake_case : Dict = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE , rouge_keys=__SCREAMING_SNAKE_CASE )
assert score_sep == score_no_sep
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Optional[int] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
__snake_case : Any = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE ) == calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , newline_sep=__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
__snake_case : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
__snake_case : int = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rouge_keys=["""rougeLsum"""] , newline_sep=__SCREAMING_SNAKE_CASE )["""rougeLsum"""]
__snake_case : Any = calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Tuple = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
__snake_case : int = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 390
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Any = "upernet"
def __init__( self : Dict , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=5_12 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[str]=[1, 2, 3, 6] , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=0.4 , _lowerCAmelCase : Any=3_84 , _lowerCAmelCase : str=2_56 , _lowerCAmelCase : int=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[int]=2_55 , **_lowerCAmelCase : int , ):
super().__init__(**_lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__snake_case : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : List[Any] = backbone_config.get("""model_type""" )
__snake_case : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__snake_case : Union[str, Any] = config_class.from_dict(_lowerCAmelCase )
__snake_case : Dict = backbone_config
__snake_case : Optional[Any] = hidden_size
__snake_case : List[Any] = initializer_range
__snake_case : Union[str, Any] = pool_scales
__snake_case : Tuple = use_auxiliary_head
__snake_case : List[Any] = auxiliary_loss_weight
__snake_case : Union[str, Any] = auxiliary_in_channels
__snake_case : str = auxiliary_channels
__snake_case : Optional[int] = auxiliary_num_convs
__snake_case : Optional[int] = auxiliary_concat_input
__snake_case : Tuple = loss_ignore_index
def snake_case__ ( self : Any ):
__snake_case : int = copy.deepcopy(self.__dict__ )
__snake_case : Optional[int] = self.backbone_config.to_dict()
__snake_case : int = self.__class__.model_type
return output
| 390
| 1
|
_lowerCAmelCase : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Any , _snake_case : str ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[s]
__a =True
while queue:
__a =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_snake_case )
__a =True
__a =u
return visited[t]
def UpperCamelCase_( _snake_case : Dict , _snake_case : Tuple , _snake_case : str ):
"""simple docstring"""
__a =[-1] * (len(_snake_case ))
__a =0
__a =[]
__a =[i[:] for i in graph] # Record original cut, copy.
while bfs(_snake_case , _snake_case , _snake_case , _snake_case ):
__a =float('Inf' )
__a =sink
while s != source:
# Find the minimum value in select path
__a =min(_snake_case , graph[parent[s]][s] )
__a =parent[s]
max_flow += path_flow
__a =sink
while v != source:
__a =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a =parent[v]
for i in range(len(_snake_case ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 242
|
def UpperCamelCase_( ):
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def UpperCamelCase_( _snake_case : Optional[int] ):
"""simple docstring"""
__a =1
__a =2
while i * i <= n:
__a =0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCamelCase_( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(_snake_case ) > 500 )
if __name__ == "__main__":
print(solution())
| 242
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCamelCase__ ( a ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase : Union[str, Any] = dataset
__lowerCAmelCase : Optional[Any] = process
__lowerCAmelCase : Dict = params
def __len__( self ) -> str:
return len(self.dataset )
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase : Union[str, Any] = self.dataset[i]
__lowerCAmelCase : Optional[int] = self.process(SCREAMING_SNAKE_CASE , **self.params )
return processed
class UpperCamelCase__ ( a ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> int:
__lowerCAmelCase : int = loader
__lowerCAmelCase : str = infer
__lowerCAmelCase : Dict = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : List[str] = loader_batch_size
# Internal bookkeeping
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : str = None
def __len__( self ) -> str:
return len(self.loader )
def __iter__( self ) -> Any:
__lowerCAmelCase : Optional[int] = iter(self.loader )
return self
def snake_case ( self ) -> Any:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__lowerCAmelCase : Tuple = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__lowerCAmelCase : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
__lowerCAmelCase : Optional[int] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__lowerCAmelCase : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowerCAmelCase : Optional[int] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__lowerCAmelCase : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowerCAmelCase : List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__lowerCAmelCase : List[Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowerCAmelCase : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowerCAmelCase : List[Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__lowerCAmelCase : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__lowerCAmelCase : Union[str, Any] = self._loader_batch_data.__class__(SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def snake_case ( self ) -> List[str]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__lowerCAmelCase : str = next(self.iterator )
__lowerCAmelCase : Any = self.infer(SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Dict = processed
else:
__lowerCAmelCase : List[str] = list(processed.keys() )[0]
__lowerCAmelCase : int = processed[key]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowerCAmelCase : Any = observed_batch_size
# Setting internal index to unwrap the batch
__lowerCAmelCase : Union[str, Any] = processed
__lowerCAmelCase : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCamelCase__ ( a ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> int:
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Dict:
__lowerCAmelCase : Optional[Any] = iter(self.loader )
__lowerCAmelCase : Tuple = None
return self
def snake_case ( self ) -> Optional[int]:
if self.subiterator is None:
__lowerCAmelCase : List[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__lowerCAmelCase : List[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__lowerCAmelCase : str = self.infer(next(self.iterator ) , **self.params )
__lowerCAmelCase : Dict = next(self.subiterator )
return processed
class UpperCamelCase__ ( a ):
'''simple docstring'''
def __iter__( self ) -> Dict:
__lowerCAmelCase : List[Any] = iter(self.loader )
return self
def snake_case ( self ) -> List[Any]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__lowerCAmelCase : List[Any] = self.loader_batch_item()
__lowerCAmelCase : int = item.pop('is_last' )
accumulator.append(SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
__lowerCAmelCase : Optional[int] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Tuple = processed
else:
__lowerCAmelCase : Tuple = list(processed.keys() )[0]
__lowerCAmelCase : List[str] = processed[key]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowerCAmelCase : Dict = observed_batch_size
__lowerCAmelCase : List[str] = processed
__lowerCAmelCase : Union[str, Any] = 0
while self._loader_batch_index < self.loader_batch_size:
__lowerCAmelCase : Optional[Any] = self.loader_batch_item()
__lowerCAmelCase : str = item.pop('is_last' )
accumulator.append(SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
__lowerCAmelCase : int = processed
__lowerCAmelCase : Tuple = item.pop('is_last' )
accumulator.append(SCREAMING_SNAKE_CASE )
return accumulator
class UpperCamelCase__ ( a ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase : Dict = dataset
__lowerCAmelCase : Tuple = key
def __len__( self ) -> Optional[Any]:
return len(self.dataset )
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.dataset[i][self.key]
class UpperCamelCase__ ( a ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase : List[Any] = dataset
__lowerCAmelCase : List[str] = keya
__lowerCAmelCase : Optional[Any] = keya
def __len__( self ) -> str:
return len(self.dataset )
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 708
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ = logging.get_logger(__name__)
A_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
A_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
A_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
A_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
A_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
A_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
A_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
A_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
A_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
A_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
A_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
A_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
A_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
A_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
A_ = auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 123
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23
|
"""simple docstring"""
from __future__ import annotations
import math
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = size
# approximate the overall size of segment tree with given value
snake_case : Union[str, Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
snake_case : Optional[Any] = [0 for i in range(0 , 4 * size )]
snake_case : int = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return idx * 2
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return idx * 2 + 1
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if left_element == right_element:
snake_case : int = a[left_element - 1]
else:
snake_case : Union[str, Any] = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.build(self.right(SCREAMING_SNAKE_CASE ) , mid + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE )] )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.flag[idx] is True:
snake_case : Tuple = self.lazy[idx]
snake_case : Optional[int] = False
if left_element != right_element:
snake_case : str = self.lazy[idx]
snake_case : Optional[int] = self.lazy[idx]
snake_case : List[Any] = True
snake_case : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
snake_case : List[str] = val
if left_element != right_element:
snake_case : Optional[int] = val
snake_case : str = val
snake_case : Dict = True
snake_case : List[str] = True
return True
snake_case : Union[str, Any] = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.update(self.right(SCREAMING_SNAKE_CASE ) , mid + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE )] )
return True
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.flag[idx] is True:
snake_case : Union[str, Any] = self.lazy[idx]
snake_case : int = False
if left_element != right_element:
snake_case : List[Any] = self.lazy[idx]
snake_case : Union[str, Any] = self.lazy[idx]
snake_case : Tuple = True
snake_case : List[Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
snake_case : List[str] = (left_element + right_element) // 2
snake_case : Dict = self.query(self.left(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : Any = self.query(self.right(SCREAMING_SNAKE_CASE ) , mid + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __str__( self ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__A = 15
__A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 134
| 0
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__SCREAMING_SNAKE_CASE : Dict = 10
__SCREAMING_SNAKE_CASE : List[str] = 256
def _snake_case ( lowercase ) -> Optional[MinHash]:
if len(lowercase ) < MIN_NUM_TOKENS:
return None
__a : str = MinHash(num_perm=lowercase )
for token in set(lowercase ):
min_hash.update(token.encode() )
return min_hash
def _snake_case ( lowercase ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowercase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
def __init__( self , *,
__UpperCamelCase = 0.8_5 , ):
'''simple docstring'''
__a : List[str] = duplication_jaccard_threshold
__a : List[Any] = NUM_PERM
__a : str = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a : str = defaultdict(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : str = self._index.query(__UpperCamelCase )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = []
for base, duplicates in self._duplicate_clusters.items():
__a : str = [base] + list(__UpperCamelCase )
# reformat the cluster to be a list of dict
__a : str = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__UpperCamelCase )
return duplicate_clusters
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : str = self.get_duplicate_clusters()
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( lowercase ) -> int:
__a , __a : int = element
__a : List[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _snake_case ( lowercase ) -> Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowercase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _snake_case ( lowercase , lowercase ) -> Any:
__a : Dict = DuplicationIndex(duplication_jaccard_threshold=lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowercase ) ) , max_queue_size=1_0_0 ) ):
di.add(lowercase , lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _snake_case ( lowercase , lowercase ) -> float:
__a : Any = get_tokens(lowercase )
__a : str = get_tokens(lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
__a : int = []
for elementa in cluster:
__a : Optional[int] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__a : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowercase , lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a : int = 1
extremes.append(lowercase )
return extremes
def _snake_case ( lowercase , lowercase , lowercase ) -> str:
global _shared_dataset
__a : Union[str, Any] = dataset
__a : Union[str, Any] = []
__a : List[str] = partial(_find_cluster_extremes_shared , jaccard_threshold=lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowercase , lowercase , ) , total=len(lowercase ) , ):
extremes_list.append(lowercase )
return extremes_list
def _snake_case ( lowercase , lowercase = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a : List[Any] = make_duplicate_clusters(lowercase , lowercase )
__a : Optional[int] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__a : Tuple = {}
__a : int = find_extremes(lowercase , lowercase , lowercase )
for extremes in extremes_clusters:
for element in extremes:
__a : List[Any] = element
__a : List[str] = duplicate_indices - set(extreme_dict.keys() )
__a : List[Any] = dataset.filter(lambda lowercase , lowercase : idx not in remove_indices , with_indices=lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__a : Optional[int] = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"""Original dataset size: {len(lowercase )}""" )
print(F"""Number of duplicate clusters: {len(lowercase )}""" )
print(F"""Files in duplicate cluster: {len(lowercase )}""" )
print(F"""Unique files in duplicate cluster: {len(lowercase )}""" )
print(F"""Filtered dataset size: {len(lowercase )}""" )
return ds_filter, duplicate_clusters
| 697
|
'''simple docstring'''
import numpy as np
from PIL import Image
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : Any = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : Union[str, Any] = 0
__a : Dict = 0
__a : Optional[Any] = 0
__a : Tuple = 0
# compute the shape of the output matrix
__a : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : Optional[Any] = 0
__a : str = 0
return updated_arr
def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray:
__a : int = np.array(lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__a : int = 0
__a : Optional[Any] = 0
__a : str = 0
__a : List[Any] = 0
# compute the shape of the output matrix
__a : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a : str = 0
__a : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE : str = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 697
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __a ( lowerCAmelCase__ : Callable[[int | float], int | float] , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int = 100 , ):
a__ : Tuple = x_start
a__ : str = fnc(lowerCAmelCase__ )
a__ : Tuple = 0.0
for _ in range(lowerCAmelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
a__ : str = (x_end - x_start) / steps + xa
a__ : Tuple = fnc(lowerCAmelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
a__ : Union[str, Any] = xa
a__ : List[str] = fxa
return length
if __name__ == "__main__":
def __a ( lowerCAmelCase__ : List[str] ):
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
__SCREAMING_SNAKE_CASE = 1_0
while i <= 1_0_0_0_0_0:
print(f'With {i} steps: {line_length(f, -1_0, 1_0, i)}')
i *= 1_0
| 688
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __a ( lowerCAmelCase__ : Union[str, Any] ):
with open(lowerCAmelCase__ , '''r''' ) as f:
a__ : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[str] , A__ : int , A__ : Union[str, Any]="<unk>" , A__ : Tuple="<cls>" , A__ : List[Any]="<pad>" , A__ : Optional[int]="<mask>" , A__ : List[Any]="<eos>" , **A__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
a__ : Union[str, Any] = load_vocab_file(A__ )
a__ : int = dict(enumerate(self.all_tokens ) )
a__ : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a__ : List[Any] = unk_token
a__ : Any = cls_token
a__ : Any = pad_token
a__ : Any = mask_token
a__ : Any = eos_token
a__ : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowerCAmelCase ( self : Any , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : Optional[Any] , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Tuple , **A__ : str ) -> List[Any]:
'''simple docstring'''
return text.split()
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
return len(self._id_to_token )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowerCAmelCase ( self : Any , A__ : str ) -> int:
'''simple docstring'''
return self._token_to_id.get(A__ , self._token_to_id.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> str:
'''simple docstring'''
return self._id_to_token.get(A__ , self.unk_token )
def __lowerCAmelCase ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Tuple = [self.cls_token_id]
a__ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowerCAmelCase ( self : Tuple , A__ : List , A__ : Optional[List] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a__ : Any = [1] + ([0] * len(A__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(A__ ) + [1]
return mask
def __lowerCAmelCase ( self : Any , A__ : Dict , A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = os.path.join(A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Union[List[str], List[AddedToken]] , A__ : bool = False ) -> int:
'''simple docstring'''
return super()._add_tokens(A__ , special_tokens=A__ )
| 688
| 1
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ : List[Any] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __A ( lowerCAmelCase_ , lowerCAmelCase_=False ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = create_model(
"""HTSAT-tiny""" , """roberta""" , lowerCAmelCase_ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCAmelCase_ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = {}
_UpperCAmelCase : List[str] = r""".*sequential.(\d+).*"""
_UpperCAmelCase : int = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCAmelCase : Optional[int] = key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
# replace sequential layers with list
_UpperCAmelCase : int = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 )
_UpperCAmelCase : Any = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(lowerCAmelCase_ )//3}.linear." )
elif re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = int(re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_UpperCAmelCase : str = 1 if projecton_layer == 0 else 2
_UpperCAmelCase : Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
_UpperCAmelCase : Dict = value
_UpperCAmelCase : Optional[Any] = mixed_qkv.size(0 ) // 3
_UpperCAmelCase : Dict = mixed_qkv[:qkv_dim]
_UpperCAmelCase : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
_UpperCAmelCase : Any = mixed_qkv[qkv_dim * 2 :]
_UpperCAmelCase : Dict = query_layer
_UpperCAmelCase : Optional[Any] = key_layer
_UpperCAmelCase : Tuple = value_layer
else:
_UpperCAmelCase : str = value
return model_state_dict
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = init_clap(lowerCAmelCase_ , enable_fusion=lowerCAmelCase_ )
clap_model.eval()
_UpperCAmelCase : Union[str, Any] = clap_model.state_dict()
_UpperCAmelCase : Optional[Any] = rename_state_dict(lowerCAmelCase_ )
_UpperCAmelCase : Any = ClapConfig()
_UpperCAmelCase : Dict = enable_fusion
_UpperCAmelCase : List[Any] = ClapModel(lowerCAmelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
transformers_config.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCAmelCase_ : Dict = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 156
|
'''simple docstring'''
from collections import defaultdict
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase_ )
if ret % 2 == 0:
cuts.append(lowerCAmelCase_ )
return ret
def __A ( ):
dfs(1 )
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = 10, 9
lowerCAmelCase_ : Union[str, Any] = defaultdict(list)
lowerCAmelCase_ : dict[int, bool] = {}
lowerCAmelCase_ : list[int] = []
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Dict = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 156
| 1
|
"""simple docstring"""
from math import factorial, pi
def _snake_case ( _snake_case : float , _snake_case : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(_snake_case , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_snake_case , _snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
_A = float(_snake_case )
_A = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_snake_case ) )
def _snake_case ( _snake_case : float , _snake_case : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(_snake_case , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_snake_case , _snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
_A = float(_snake_case )
_A = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 7
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Optional[int] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = DebertaVaTokenizer
snake_case__ = DebertaVaTokenizerFast
snake_case__ = True
snake_case__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'this is a test'
UpperCamelCase = 'this is a test'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = '<pad>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3_0001 )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = ' \tHeLLo!how \n Are yoU? '
UpperCamelCase = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ' \tHeLLo!how \n Are yoU? '
UpperCamelCase = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'This is a test'
UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
UpperCamelCase = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
UpperCamelCase = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# fmt: off
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
UpperCamelCase = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode('sequence builders' )
UpperCamelCase = tokenizer.encode('multi-sequence build' )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 280
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
__A : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
__A : Tuple = {
'ctrl': 256,
}
__A : Any = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
snake_case_ : int = set()
snake_case_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : Union[str, Any] = char
snake_case_ : List[Any] = set(A_ )
return pairs
class __UpperCamelCase ( _A ):
lowercase : List[Any] = VOCAB_FILES_NAMES
lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = CONTROL_CODES
def __init__( self :int ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Any="<unk>" ,**_UpperCamelCase :Dict ):
super().__init__(unk_token=UpperCamelCase__ ,**UpperCamelCase__ )
with open(UpperCamelCase__ ,encoding="""utf-8""" ) as vocab_handle:
snake_case_ : Optional[Any] = json.load(UpperCamelCase__ )
snake_case_ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase__ ,encoding="""utf-8""" ) as merges_handle:
snake_case_ : Any = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
snake_case_ : Optional[int] = dict(zip(UpperCamelCase__ ,range(len(UpperCamelCase__ ) ) ) )
snake_case_ : Tuple = {}
@property
def a__ ( self :Optional[int] ):
return len(self.encoder )
def a__ ( self :Tuple ):
return dict(self.encoder ,**self.added_tokens_encoder )
def a__ ( self :str ,_UpperCamelCase :List[Any] ):
if token in self.cache:
return self.cache[token]
snake_case_ : Dict = tuple(UpperCamelCase__ )
snake_case_ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
snake_case_ : List[Any] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
snake_case_ : Any = min(UpperCamelCase__ ,key=lambda _UpperCamelCase : self.bpe_ranks.get(UpperCamelCase__ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : Dict = bigram
snake_case_ : Optional[Any] = []
snake_case_ : int = 0
while i < len(UpperCamelCase__ ):
try:
snake_case_ : Any = word.index(UpperCamelCase__ ,UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : List[str] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Optional[int] = tuple(UpperCamelCase__ )
snake_case_ : Optional[Any] = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
snake_case_ : Union[str, Any] = get_pairs(UpperCamelCase__ )
snake_case_ : str = """@@ """.join(UpperCamelCase__ )
snake_case_ : Union[str, Any] = word[:-4]
snake_case_ : Any = word
return word
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Tuple ):
snake_case_ : List[str] = []
snake_case_ : Optional[Any] = re.findall(R"""\S+\n?""" ,UpperCamelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(""" """ ) ) )
return split_tokens
def a__ ( self :int ,_UpperCamelCase :str ):
return self.encoder.get(UpperCamelCase__ ,self.encoder.get(self.unk_token ) )
def a__ ( self :Optional[int] ,_UpperCamelCase :List[Any] ):
return self.decoder.get(UpperCamelCase__ ,self.unk_token )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Tuple ):
snake_case_ : Optional[Any] = """ """.join(UpperCamelCase__ ).replace("""@@ """ ,"""""" ).strip()
return out_string
def a__ ( self :Optional[int] ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Tuple = os.path.join(
UpperCamelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Optional[int] = os.path.join(
UpperCamelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=UpperCamelCase__ ,ensure_ascii=UpperCamelCase__ ) + """\n""" )
snake_case_ : str = 0
with open(UpperCamelCase__ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ : Optional[Any] = token_index
writer.write(""" """.join(UpperCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 710
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Dict = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Tuple = 'pix2struct_text_model'
lowercase : int = ['past_key_values']
lowercase : int = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self :int ,_UpperCamelCase :List[str]=5_0_2_4_4 ,_UpperCamelCase :Optional[Any]=7_6_8 ,_UpperCamelCase :Dict=6_4 ,_UpperCamelCase :Dict=2_0_4_8 ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :List[str]=3_2 ,_UpperCamelCase :Union[str, Any]=1_2_8 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :List[str]=1E-6 ,_UpperCamelCase :List[Any]=1.0 ,_UpperCamelCase :Optional[int]="gelu_new" ,_UpperCamelCase :Dict=0 ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0 ,_UpperCamelCase :Dict=1 ,_UpperCamelCase :List[Any]=False ,_UpperCamelCase :Tuple=True ,**_UpperCamelCase :List[Any] ,):
snake_case_ : List[str] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : Any = d_kv
snake_case_ : List[Any] = d_ff
snake_case_ : Union[str, Any] = num_layers
snake_case_ : Union[str, Any] = num_heads
snake_case_ : str = relative_attention_num_buckets
snake_case_ : Optional[int] = relative_attention_max_distance
snake_case_ : Tuple = dropout_rate
snake_case_ : Tuple = layer_norm_epsilon
snake_case_ : Any = initializer_factor
snake_case_ : List[Any] = use_cache
snake_case_ : Optional[int] = eos_token_id
snake_case_ : List[Any] = decoder_start_token_id
# for backwards compatibility
snake_case_ : List[str] = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,decoder_start_token_id=_UpperCamelCase ,tie_word_embeddings=_UpperCamelCase ,is_decoder=_UpperCamelCase ,**_UpperCamelCase ,)
@classmethod
def a__ ( cls :Optional[int] ,_UpperCamelCase :Union[str, os.PathLike] ,**_UpperCamelCase :Optional[int] ):
cls._set_token_in_kwargs(_UpperCamelCase )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(_UpperCamelCase ,**_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
snake_case_ : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase ,**_UpperCamelCase )
class __UpperCamelCase ( lowercase__ ):
lowercase : Dict = 'pix2struct_vision_model'
def __init__( self :List[str] ,_UpperCamelCase :Any=7_6_8 ,_UpperCamelCase :List[str]=7_6_8 ,_UpperCamelCase :List[Any]=2_0_4_8 ,_UpperCamelCase :Union[str, Any]=6_4 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :Any="gelu_new" ,_UpperCamelCase :Optional[int]=1E-6 ,_UpperCamelCase :List[Any]=0.0 ,_UpperCamelCase :Union[str, Any]=0.0 ,_UpperCamelCase :int=1E-1_0 ,_UpperCamelCase :str=1.0 ,_UpperCamelCase :Optional[int]=4_0_9_6 ,_UpperCamelCase :str=3_2 ,_UpperCamelCase :Union[str, Any]=1_2_8 ,**_UpperCamelCase :Union[str, Any] ,):
super().__init__(**_UpperCamelCase )
snake_case_ : str = hidden_size
snake_case_ : Tuple = patch_embed_hidden_size
snake_case_ : Optional[int] = d_ff
snake_case_ : Dict = dropout_rate
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Optional[Any] = initializer_factor
snake_case_ : List[str] = attention_dropout
snake_case_ : List[str] = layer_norm_eps
snake_case_ : List[str] = dense_act_fn
snake_case_ : int = seq_len
snake_case_ : str = relative_attention_num_buckets
snake_case_ : Tuple = relative_attention_max_distance
snake_case_ : List[str] = d_kv
@classmethod
def a__ ( cls :Optional[Any] ,_UpperCamelCase :Union[str, os.PathLike] ,**_UpperCamelCase :List[str] ):
cls._set_token_in_kwargs(_UpperCamelCase )
snake_case_ , snake_case_ : Tuple = cls.get_config_dict(_UpperCamelCase ,**_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
snake_case_ : Dict = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase ,**_UpperCamelCase )
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[str, Any] = 'pix2struct'
lowercase : int = True
def __init__( self :int ,_UpperCamelCase :Tuple=None ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Dict=1.0 ,_UpperCamelCase :Optional[int]=0.02 ,_UpperCamelCase :Tuple=False ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Dict=True ,**_UpperCamelCase :Union[str, Any] ,):
super().__init__(tie_word_embeddings=_UpperCamelCase ,is_encoder_decoder=_UpperCamelCase ,**_UpperCamelCase )
if text_config is None:
snake_case_ : Optional[int] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
snake_case_ : List[str] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
snake_case_ : Optional[int] = PixaStructTextConfig(**_UpperCamelCase )
snake_case_ : List[str] = PixaStructVisionConfig(**_UpperCamelCase )
snake_case_ : Any = self.text_config.decoder_start_token_id
snake_case_ : List[Any] = self.text_config.pad_token_id
snake_case_ : Optional[int] = self.text_config.eos_token_id
snake_case_ : Optional[int] = initializer_factor
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = self.initializer_range
snake_case_ : str = self.initializer_range
snake_case_ : Dict = is_vqa
@classmethod
def a__ ( cls :Any ,_UpperCamelCase :PixaStructTextConfig ,_UpperCamelCase :PixaStructVisionConfig ,**_UpperCamelCase :Any ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_UpperCamelCase )
def a__ ( self :Optional[int] ):
snake_case_ : int = copy.deepcopy(self.__dict__ )
snake_case_ : str = self.text_config.to_dict()
snake_case_ : Tuple = self.vision_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 267
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=32 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=[10, 20, 30, 40] ,__UpperCamelCase=[2, 2, 3, 2] ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=["stage2", "stage3", "stage4"] ,__UpperCamelCase=3 ,__UpperCamelCase=None ,) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : Any = num_channels
lowercase_ : Optional[int] = num_stages
lowercase_ : Dict = hidden_sizes
lowercase_ : int = depths
lowercase_ : Optional[Any] = is_training
lowercase_ : Tuple = use_labels
lowercase_ : int = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Any = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : List[Any] = out_features
lowercase_ : List[str] = num_labels
lowercase_ : Optional[int] = scope
lowercase_ : Optional[int] = num_stages
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=512 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=__UpperCamelCase ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=256 ,auxiliary_num_convs=1 ,auxiliary_concat_input=__UpperCamelCase ,loss_ignore_index=255 ,num_labels=self.num_labels ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = UperNetForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = UperNetModelTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(__UpperCamelCase )
lowercase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ : Tuple = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = _config_zero_init(__UpperCamelCase )
lowercase_ : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@unittest.skip(reason='UperNet does not have tied weights' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : str = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
lowercase_ : Union[str, Any] = Image.open(__SCREAMING_SNAKE_CASE ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
lowercase_ : Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCamelCase )
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Optional[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
with torch.no_grad():
lowercase_ : str = model(**__UpperCamelCase )
lowercase_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
lowercase_ : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCamelCase )
lowercase_ : Optional[int] = prepare_img()
lowercase_ : Optional[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
with torch.no_grad():
lowercase_ : Optional[int] = model(**__UpperCamelCase )
lowercase_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : int = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 425
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 425
| 1
|
UpperCamelCase__ = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
UpperCamelCase__ = frozenset(["prompt", "negative_prompt"])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(["image"])
UpperCamelCase__ = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
UpperCamelCase__ = frozenset(["image"])
UpperCamelCase__ = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
UpperCamelCase__ = frozenset(["prompt", "image", "negative_prompt"])
UpperCamelCase__ = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
UpperCamelCase__ = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
UpperCamelCase__ = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
UpperCamelCase__ = frozenset(["image", "mask_image"])
UpperCamelCase__ = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
UpperCamelCase__ = frozenset(["example_image", "image", "mask_image"])
UpperCamelCase__ = frozenset(["class_labels"])
UpperCamelCase__ = frozenset(["class_labels"])
UpperCamelCase__ = frozenset(["batch_size"])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(["batch_size"])
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
UpperCamelCase__ = frozenset(["prompt", "negative_prompt"])
UpperCamelCase__ = frozenset(["input_tokens"])
UpperCamelCase__ = frozenset(["input_tokens"])
| 634
|
import unittest
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
if shape_a[0] != shape_b[0]:
__lowercase = (
"""Expected the same number of rows for A and B. """
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(lowercase__ )
if shape_b[1] != shape_c[1]:
__lowercase = (
"""Expected the same number of columns for B and C. """
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(lowercase__ )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(lowercase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(lowercase , lowercase , lowercase )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
self.assertAlmostEqual(lowercase , det_a * det_s )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 634
| 1
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
SCREAMING_SNAKE_CASE_ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
SCREAMING_SNAKE_CASE_ = requests.get(image_url).content
SCREAMING_SNAKE_CASE_ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 465
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 465
| 1
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a : Optional[Any] = logging.get_logger(__name__)
a : Any = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
a : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a : Union[str, Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
a : List[str] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
a : Optional[int] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
a : str = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
a : Any = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
a : int = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
a : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
a : Any = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
a : Optional[int] = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
a : List[Any] = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
a : List[Any] = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
a : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
a : Union[str, Any] = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
a : Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a : List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
a : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a : str = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a : List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a : Optional[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 705
|
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680
| 0
|
'''simple docstring'''
import operator
def SCREAMING_SNAKE_CASE ( lowercase_ : list , lowercase_ : bool = False , lowercase_ : list | None = None ):
lowercase = operator.lt if reverse else operator.gt
lowercase = solution or []
if not arr:
return solution
lowercase = [arr.pop(0 )]
for i, item in enumerate(lowercase_ ):
if _operator(lowercase_ , sublist[-1] ):
sublist.append(lowercase_ )
arr.pop(lowercase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowercase_ )
else:
while sublist:
lowercase = sublist.pop(0 )
for i, xx in enumerate(lowercase_ ):
if not _operator(lowercase_ , lowercase_ ):
solution.insert(lowercase_ , lowercase_ )
break
else:
solution.append(lowercase_ )
strand_sort(lowercase_ , lowercase_ , lowercase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 588
|
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = XLMProphetNetTokenizer
__A = False
__A = True
def _a ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XLMProphetNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """[PAD]"""
lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_lowerCAmelCase ) , 1012 )
def _a ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = XLMProphetNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def _a ( self ) -> Any:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = """Hello World!"""
lowercase = [3_5389, 6672, 49, 2]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = {"""input_ids""": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 588
| 1
|
"""simple docstring"""
import functools
def __a ( A , A ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not all(isinstance(lowercase_ , lowercase_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowercase_ ) != 3 or not all(isinstance(lowercase_ , lowercase_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowercase_ ) == 0:
return 0
if min(lowercase_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowercase_ ) >= 3_66:
raise ValueError("All days elements should be less than 366" )
lowercase__ = set(lowercase_ )
@functools.cache
def dynamic_programming(A ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowerCAmelCase ( UpperCamelCase__ ):
_UpperCamelCase : List[Any] = """yolos"""
def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=[512, 864] , snake_case=16 , snake_case=3 , snake_case=True , snake_case=100 , snake_case=True , snake_case=False , snake_case=1 , snake_case=5 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.1 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**_a )
a__ : Any = hidden_size
a__ : Optional[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : List[Any] = intermediate_size
a__ : str = hidden_act
a__ : Optional[Any] = hidden_dropout_prob
a__ : Optional[Any] = attention_probs_dropout_prob
a__ : Tuple = initializer_range
a__ : Any = layer_norm_eps
a__ : Dict = image_size
a__ : Union[str, Any] = patch_size
a__ : Union[str, Any] = num_channels
a__ : Optional[int] = qkv_bias
a__ : List[str] = num_detection_tokens
a__ : Optional[int] = use_mid_position_embeddings
a__ : Optional[int] = auxiliary_loss
# Hungarian matcher
a__ : int = class_cost
a__ : Tuple = bbox_cost
a__ : Union[str, Any] = giou_cost
# Loss coefficients
a__ : int = bbox_loss_coefficient
a__ : int = giou_loss_coefficient
a__ : Any = eos_coefficient
class __lowerCAmelCase ( UpperCamelCase__ ):
_UpperCamelCase : Tuple = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return 12
| 112
|
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = len(snake_case_ )
_A : str = [[0] * n for i in range(snake_case_ )]
for i in range(snake_case_ ):
_A : Optional[Any] = y_points[i]
for i in range(2,snake_case_ ):
for j in range(snake_case_,snake_case_ ):
_A : List[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 0
|
'''simple docstring'''
from math import factorial
lowercase_ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 60 , lowercase_ : int = 100_0000 ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowercase = 0
# the cached sizes of the previous chains
lowercase = {}
for start_chain_element in range(1 , _lowerCamelCase ):
# The temporary set will contain the elements of the chain
lowercase = set()
lowercase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowercase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowerCamelCase )
chain_set_length += 1
lowercase = digit_factorial_sum(_lowerCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowercase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 700
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653
| 0
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 367
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : List[Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ['''OwlViTFeatureExtractor''']
lowerCamelCase : str = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 342
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''encodec'''
def __init__( self : str , __UpperCamelCase : List[str]=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , __UpperCamelCase : str=2_4000 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Union[str, Any]=128 , __UpperCamelCase : Dict=32 , __UpperCamelCase : Dict=1 , __UpperCamelCase : Optional[int]=[8, 5, 4, 2] , __UpperCamelCase : List[Any]="weight_norm" , __UpperCamelCase : Any=7 , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : str=2 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Dict="reflect" , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : int=1.0 , __UpperCamelCase : Union[str, Any]=1024 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=True , **__UpperCamelCase : List[str] , ) -> List[str]:
_UpperCamelCase = target_bandwidths
_UpperCamelCase = sampling_rate
_UpperCamelCase = audio_channels
_UpperCamelCase = normalize
_UpperCamelCase = chunk_length_s
_UpperCamelCase = overlap
_UpperCamelCase = hidden_size
_UpperCamelCase = num_filters
_UpperCamelCase = num_residual_layers
_UpperCamelCase = upsampling_ratios
_UpperCamelCase = norm_type
_UpperCamelCase = kernel_size
_UpperCamelCase = last_kernel_size
_UpperCamelCase = residual_kernel_size
_UpperCamelCase = dilation_growth_rate
_UpperCamelCase = use_causal_conv
_UpperCamelCase = pad_mode
_UpperCamelCase = compress
_UpperCamelCase = num_lstm_layers
_UpperCamelCase = trim_right_ratio
_UpperCamelCase = codebook_size
_UpperCamelCase = codebook_dim if codebook_dim is not None else hidden_size
_UpperCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__UpperCamelCase )
@property
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
_UpperCamelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _UpperCamelCase ( self : Dict ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 342
| 1
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowerCamelCase : str = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ) -> str:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )]
if identifier is not None:
_lowerCAmelCase : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for n_ in n_identifier:
_lowerCAmelCase : List[Any] = [file for file in files if n_ not in file]
else:
_lowerCAmelCase : str = [file for file in files if n_identifier not in file]
_lowerCAmelCase : int = ignore_files or []
ignore_files.append("""__init__.py""" )
_lowerCAmelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _UpperCAmelCase )
if only_modules:
_lowerCAmelCase : Tuple = file.split(""".""" )[0]
try:
_lowerCAmelCase : Optional[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[Any] = doctest.DocTestSuite(_UpperCAmelCase )
_lowerCAmelCase : Dict = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"{module_identifier} is not a module." )
else:
_lowerCAmelCase : int = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Dict = Path("""src/transformers""" )
_lowerCAmelCase : List[str] = """modeling"""
_lowerCAmelCase : List[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[str] = Path("""src/transformers""" )
_lowerCAmelCase : Any = """tokenization"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : str = Path("""src/transformers""" )
_lowerCAmelCase : Dict = """configuration"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Any = Path("""src/transformers""" )
_lowerCAmelCase : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Path("""docs/source""" )
_lowerCAmelCase : Tuple = ["""favicon.ico"""]
self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
| 429
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __snake_case (_a ):
lowerCAmelCase__ = "funnel"
lowerCAmelCase__ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : Tuple , _UpperCAmelCase : Dict=3_0522 , _UpperCAmelCase : List[str]=[4, 4, 4] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[str]=3072 , _UpperCAmelCase : List[str]="gelu_new" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=1E-9 , _UpperCAmelCase : Dict="mean" , _UpperCAmelCase : Any="relative_shift" , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : Optional[int] , ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Dict = block_sizes
_lowerCAmelCase : Optional[int] = [1] * len(_UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(_UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_lowerCAmelCase : Any = num_decoder_layers
_lowerCAmelCase : List[Any] = d_model
_lowerCAmelCase : Optional[int] = n_head
_lowerCAmelCase : Optional[int] = d_head
_lowerCAmelCase : int = d_inner
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Dict = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : Optional[Any] = activation_dropout
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[Any] = initializer_std
_lowerCAmelCase : Dict = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
_lowerCAmelCase : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
_lowerCAmelCase : List[Any] = attention_type
_lowerCAmelCase : Any = separate_cls
_lowerCAmelCase : int = truncate_seq
_lowerCAmelCase : List[Any] = pool_q_only
super().__init__(**_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 429
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> None:
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__)
| 150
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : str = '''altclip_text_model'''
def __init__( self , lowerCamelCase__=250_002 , lowerCamelCase__=1_024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4_096 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=514 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=768 , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__)
snake_case__ : str = vocab_size
snake_case__ : int = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[str] = hidden_act
snake_case__ : List[Any] = intermediate_size
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Any = type_vocab_size
snake_case__ : int = initializer_range
snake_case__ : Dict = initializer_factor
snake_case__ : Optional[Any] = layer_norm_eps
snake_case__ : List[str] = position_embedding_type
snake_case__ : Union[str, Any] = use_cache
snake_case__ : Dict = project_dim
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : int = '''altclip_vision_model'''
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=3_072 , lowerCamelCase__=512 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3 , lowerCamelCase__=224 , lowerCamelCase__=32 , lowerCamelCase__="quick_gelu" , lowerCamelCase__=1E-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1.0 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__)
snake_case__ : str = hidden_size
snake_case__ : List[Any] = intermediate_size
snake_case__ : Union[str, Any] = projection_dim
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = patch_size
snake_case__ : List[Any] = image_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = initializer_factor
snake_case__ : int = attention_dropout
snake_case__ : List[str] = layer_norm_eps
snake_case__ : List[str] = hidden_act
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ , **lowerCamelCase__) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__)
snake_case__, snake_case__ : str = cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__)
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type") == "altclip":
snake_case__ : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__)
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Dict = '''altclip'''
__magic_name__ : Union[str, Any] = True
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=768 , lowerCamelCase__=2.65_92 , **lowerCamelCase__) -> Any:
'''simple docstring'''
snake_case__ : List[Any] = kwargs.pop("text_config_dict" , lowerCamelCase__)
snake_case__ : str = kwargs.pop("vision_config_dict" , lowerCamelCase__)
super().__init__(**lowerCamelCase__)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case__ : str = {}
# This is the complete result when using `text_config_dict`.
snake_case__ : str = AltCLIPTextConfig(**lowerCamelCase__).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case__ : List[Any] = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case__ : Dict = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCamelCase__)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
snake_case__ : List[Any] = {}
# This is the complete result when using `vision_config_dict`.
snake_case__ : Union[str, Any] = AltCLIPVisionConfig(**lowerCamelCase__).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case__ : List[Any] = {
str(lowerCamelCase__): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case__ : Any = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case__ : int = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCamelCase__)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
snake_case__ : Tuple = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.")
if vision_config is None:
snake_case__ : List[Any] = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.")
snake_case__ : Any = AltCLIPTextConfig(**lowerCamelCase__)
snake_case__ : Dict = AltCLIPVisionConfig(**lowerCamelCase__)
snake_case__ : List[str] = projection_dim
snake_case__ : Tuple = logit_scale_init_value
snake_case__ : List[Any] = 1.0
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase__)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
snake_case__ : Any = copy.deepcopy(self.__dict__)
snake_case__ : Optional[Any] = self.text_config.to_dict()
snake_case__ : List[str] = self.vision_config.to_dict()
snake_case__ : str = self.__class__.model_type
return output
| 150
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ : str ='''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def __lowercase ( a__=None ) -> Tuple:
if subparsers is not None:
__SCREAMING_SNAKE_CASE = subparsers.add_parser('tpu-config' , description=_description )
else:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
__SCREAMING_SNAKE_CASE = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=a__ , default=a__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=a__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=a__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
__SCREAMING_SNAKE_CASE = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=a__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def __lowercase ( a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__SCREAMING_SNAKE_CASE = defaults.command_file
if not args.command and defaults.commands is not None:
__SCREAMING_SNAKE_CASE = defaults.commands
if not args.tpu_name:
__SCREAMING_SNAKE_CASE = defaults.tpu_name
if not args.tpu_zone:
__SCREAMING_SNAKE_CASE = defaults.tpu_zone
if args.accelerate_version == "dev":
__SCREAMING_SNAKE_CASE = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
__SCREAMING_SNAKE_CASE = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , a__ ):
__SCREAMING_SNAKE_CASE = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
__SCREAMING_SNAKE_CASE = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , a__ ):
__SCREAMING_SNAKE_CASE = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__SCREAMING_SNAKE_CASE = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__SCREAMING_SNAKE_CASE = '; '.join(a__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__SCREAMING_SNAKE_CASE = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {" ".join(a__ )}""" )
return
subprocess.run(a__ )
print('Successfully setup pod.' )
def __lowercase ( ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = tpu_command_parser()
__SCREAMING_SNAKE_CASE = parser.parse_args()
tpu_command_launcher(a__ )
| 148
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : List[str] ={
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int =['''CLIPFeatureExtractor''']
lowerCAmelCase__ : Optional[Any] =['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any =[
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 148
| 1
|
'''simple docstring'''
from statistics import mean, stdev
def lowercase_ ( lowercase__ , lowercase__ = 3 ) ->list:
_snake_case: Union[str, Any] = min(lowercase__ )
_snake_case: str = max(lowercase__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowercase__ ) for x in data]
def lowercase_ ( lowercase__ , lowercase__ = 3 ) ->list:
_snake_case: Union[str, Any] = mean(lowercase__ )
_snake_case: Optional[Any] = stdev(lowercase__ )
# standardize data
return [round((x - mu) / (sigma) , lowercase__ ) for x in data]
| 708
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
A : Optional[int] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
A : Dict = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
A : List[Any] = BeautifulSoup(res.text, 'html.parser')
A : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 273
| 0
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='bart'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str , a : Optional[int]=5_0265 , a : Any=1024 , a : Tuple=12 , a : Optional[Any]=4096 , a : Any=16 , a : Optional[int]=12 , a : Any=4096 , a : List[Any]=16 , a : Union[str, Any]=0.0 , a : List[str]=0.0 , a : List[Any]="gelu" , a : Dict=1024 , a : Union[str, Any]=0.1 , a : Any=0.0 , a : Union[str, Any]=0.0 , a : Union[str, Any]=0.02 , a : Optional[int]=0.0 , a : Tuple=False , a : Dict=True , a : Optional[int]=3 , a : Union[str, Any]=1 , a : str=0 , a : Dict=2 , a : Any=True , a : str=2 , a : Optional[Any]=2 , **a : List[str] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = decoder_layers
SCREAMING_SNAKE_CASE : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = dropout
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : Dict = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = init_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a , pad_token_id=a , bos_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , a ):
SCREAMING_SNAKE_CASE : List[str] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed." )
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Any = {0: "batch"}
SCREAMING_SNAKE_CASE : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE : Tuple = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.num_layers
for i in range(a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE : int = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = super().outputs
else:
SCREAMING_SNAKE_CASE : str = super(a , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.num_layers
for i in range(a ):
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase ( self : Optional[Any] , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , a , a , a , a )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : str = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , a , a , a , a )
SCREAMING_SNAKE_CASE : Dict = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = dict(**a , **a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE : Tuple = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a , a )] , dim=1 )
SCREAMING_SNAKE_CASE : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self.num_layers
SCREAMING_SNAKE_CASE : Any = min(a , a )
SCREAMING_SNAKE_CASE : Any = max(a , a ) - min_num_layers
SCREAMING_SNAKE_CASE : int = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(a ):
common_inputs["past_key_values"].append(
(
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(a , a ):
common_inputs["past_key_values"].append((torch.zeros(a ), torch.zeros(a )) )
return common_inputs
def __UpperCamelCase ( self : Any , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , a , a , a , a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Any = seqlen + 2
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.num_layers
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(
[common_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(a )
]
return common_inputs
def __UpperCamelCase ( self : Tuple , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(a )
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : str = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Tuple = dict(tokenizer(a , return_tensors=a ) )
return common_inputs
def __UpperCamelCase ( self : Optional[int] , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
else:
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
return common_inputs
def __UpperCamelCase ( self : Optional[Any] , a : Tuple , a : Tuple , a : int , a : Tuple ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[int] = super()._flatten_past_key_values_(a , a , a , a )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = super(a , self )._flatten_past_key_values_(
a , a , a , a )
| 25
|
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Tuple , __a : int ) ->Optional[int]:
lowerCamelCase_ : Optional[Any] = n
lowerCamelCase_ : Dict = [None] * self.n
lowerCamelCase_ : int = 0 # index of the first element
lowerCamelCase_ : str = 0
lowerCamelCase_ : Optional[Any] = 0
def __len__( self : List[str] ) ->int:
return self.size
def _lowerCAmelCase ( self : str ) ->bool:
return self.size == 0
def _lowerCAmelCase ( self : str ) ->Any:
return False if self.is_empty() else self.array[self.front]
def _lowerCAmelCase ( self : Union[str, Any] , __a : int ) ->List[Any]:
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
lowerCamelCase_ : Tuple = data
lowerCamelCase_ : Tuple = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCAmelCase ( self : Tuple ) ->int:
if self.size == 0:
raise Exception("""UNDERFLOW""" )
lowerCamelCase_ : str = self.array[self.front]
lowerCamelCase_ : str = None
lowerCamelCase_ : Optional[Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 278
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[str] =['''input_features''', '''is_longer''']
def __init__( self: Optional[Any] , __a: str=64 , __a: List[str]=48_000 , __a: List[Any]=480 , __a: Dict=10 , __a: List[Any]=1_024 , __a: str=0.0 , __a: int=False , __a: float = 0 , __a: float = 14_000 , __a: int = None , __a: str = "fusion" , __a: str = "repeatpad" , **__a: Union[str, Any] , )-> Dict:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
lowerCamelCase : List[Any] = top_db
lowerCamelCase : Optional[int] = truncation
lowerCamelCase : Any = padding
lowerCamelCase : Dict = fft_window_size
lowerCamelCase : Tuple = (fft_window_size >> 1) + 1
lowerCamelCase : Dict = hop_length
lowerCamelCase : Tuple = max_length_s
lowerCamelCase : Any = max_length_s * sampling_rate
lowerCamelCase : Optional[int] = sampling_rate
lowerCamelCase : str = frequency_min
lowerCamelCase : Optional[int] = frequency_max
lowerCamelCase : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm=__a , mel_scale="""htk""" , )
lowerCamelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm="""slaney""" , mel_scale="""slaney""" , )
def a__ ( self: Any )-> Dict[str, Any]:
lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a__ ( self: Any , __a: np.array , __a: Optional[np.array] = None )-> np.ndarray:
lowerCamelCase : Optional[Any] = spectrogram(
__a , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__a , log_mel="""dB""" , )
return log_mel_spectrogram.T
def a__ ( self: Union[str, Any] , __a: Union[str, Any] , __a: Optional[Any] , __a: Any )-> Optional[int]:
lowerCamelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase : List[Any] = [0]
# randomly choose index for each part
lowerCamelCase : int = np.random.choice(ranges[0] )
lowerCamelCase : List[str] = np.random.choice(ranges[1] )
lowerCamelCase : str = np.random.choice(ranges[2] )
lowerCamelCase : Tuple = mel[idx_front : idx_front + chunk_frames, :]
lowerCamelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCamelCase : str = mel[idx_back : idx_back + chunk_frames, :]
lowerCamelCase : Union[str, Any] = torch.tensor(mel[None, None, :] )
lowerCamelCase : Dict = torch.nn.functional.interpolate(
__a , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=__a )
lowerCamelCase : str = mel_shrink[0][0].numpy()
lowerCamelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a__ ( self: Union[str, Any] , __a: np.array , __a: List[Any] , __a: str , __a: List[Any] )-> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCamelCase : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCamelCase : str = len(__a ) - max_length
lowerCamelCase : Optional[Any] = np.random.randint(0 , overflow + 1 )
lowerCamelCase : Union[str, Any] = waveform[idx : idx + max_length]
lowerCamelCase : List[Any] = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCamelCase : Optional[Any] = self._np_extract_fbank_features(__a , self.mel_filters )
lowerCamelCase : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCamelCase : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCamelCase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCamelCase : List[str] = False
else:
lowerCamelCase : Any = self._random_mel_fusion(__a , __a , __a )
lowerCamelCase : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
lowerCamelCase : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCamelCase : Tuple = int(max_length / len(__a ) )
lowerCamelCase : Dict = np.stack(np.tile(__a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCamelCase : Optional[Any] = int(max_length / len(__a ) )
lowerCamelCase : Optional[Any] = np.stack(np.tile(__a , __a ) )
lowerCamelCase : Optional[Any] = np.pad(__a , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
lowerCamelCase : str = self._np_extract_fbank_features(__a , self.mel_filters )
lowerCamelCase : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCamelCase : Dict = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self: List[Any] , __a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a: str = None , __a: Optional[str] = None , __a: Optional[int] = None , __a: Optional[int] = None , __a: Optional[Union[str, TensorType]] = None , **__a: str , )-> BatchFeature:
lowerCamelCase : Optional[Any] = truncation if truncation is not None else self.truncation
lowerCamelCase : str = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase : int = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase : str = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : Tuple = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
lowerCamelCase : Optional[Any] = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : List[Any] = [np.asarray(__a )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCamelCase : Optional[Any] = [
self._get_input_mel(__a , max_length if max_length else self.nb_max_samples , __a , __a )
for waveform in raw_speech
]
lowerCamelCase : Optional[int] = []
lowerCamelCase : Union[str, Any] = []
for mel, longer in padded_inputs:
input_mel.append(__a )
is_longer.append(__a )
if truncation == "fusion" and sum(__a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCamelCase : Optional[Any] = np.random.randint(0 , len(__a ) )
lowerCamelCase : Union[str, Any] = True
if isinstance(input_mel[0] , __a ):
lowerCamelCase : List[str] = [np.asarray(__a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCamelCase : Tuple = [[longer] for longer in is_longer]
lowerCamelCase : Union[str, Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
lowerCamelCase : Any = BatchFeature(__a )
if return_tensors is not None:
lowerCamelCase : str = input_features.convert_to_tensors(__a )
return input_features
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
'''simple docstring'''
import random
def a_ ( __UpperCAmelCase ) -> bool:
"""simple docstring"""
snake_case: Dict =num - 1
snake_case: Any =0
while s % 2 == 0:
snake_case: Any =s // 2
t += 1
for _ in range(5 ):
snake_case: Any =random.randrange(2 , num - 1 )
snake_case: Dict =pow(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if v != 1:
snake_case: Dict =0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case: Any =i + 1
snake_case: Union[str, Any] =(v**2) % num
return True
def a_ ( __UpperCAmelCase ) -> bool:
"""simple docstring"""
if num < 2:
return False
snake_case: Union[str, Any] =[
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__UpperCAmelCase )
def a_ ( __UpperCAmelCase = 10_24 ) -> int:
"""simple docstring"""
while True:
snake_case: Union[str, Any] =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__UpperCAmelCase ):
return num
if __name__ == "__main__":
a = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 350
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a = random.Random()
def a_ ( __UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> List[str]:
"""simple docstring"""
if rng is None:
snake_case: Dict =global_rng
snake_case: Tuple =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a_ ( unittest.TestCase ):
def __init__( self : int , a_ : Any , a_ : str=7 , a_ : Tuple=4_0_0 , a_ : List[Any]=2_0_0_0 , a_ : str=2_0_4_8 , a_ : List[str]=1_2_8 , a_ : int=1 , a_ : Tuple=5_1_2 , a_ : Dict=3_0 , a_ : Optional[int]=4_4_1_0_0 , ) -> Union[str, Any]:
snake_case: Union[str, Any] =parent
snake_case: Optional[Any] =batch_size
snake_case: Union[str, Any] =min_seq_length
snake_case: List[str] =max_seq_length
snake_case: str =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case: int =spectrogram_length
snake_case: List[str] =feature_size
snake_case: Dict =num_audio_channels
snake_case: int =hop_length
snake_case: List[Any] =chunk_length
snake_case: Optional[Any] =sampling_rate
def UpperCamelCase ( self : Any ) -> Dict:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase ( self : Tuple , a_ : Optional[Any]=False , a_ : Tuple=False ) -> Optional[int]:
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
snake_case: Union[str, Any] =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case: Union[str, Any] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case: Any =[np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : Optional[Any] = TvltFeatureExtractor
def UpperCamelCase ( self : int ) -> int:
snake_case: Union[str, Any] =TvltFeatureExtractionTester(self )
def UpperCamelCase ( self : Optional[Any] ) -> str:
snake_case: int =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(a_ , 'feature_size' ) )
self.assertTrue(hasattr(a_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(a_ , 'hop_length' ) )
self.assertTrue(hasattr(a_ , 'chunk_length' ) )
self.assertTrue(hasattr(a_ , 'sampling_rate' ) )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
snake_case: Tuple =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: Optional[Any] =feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
snake_case: str =self.feature_extraction_class.from_pretrained(a_ )
snake_case: Optional[int] =feat_extract_first.to_dict()
snake_case: List[str] =feat_extract_second.to_dict()
snake_case: Optional[int] =dict_first.pop('mel_filters' )
snake_case: List[Any] =dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def UpperCamelCase ( self : str ) -> str:
snake_case: Any =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: int =os.path.join(a_ , 'feat_extract.json' )
feat_extract_first.to_json_file(a_ )
snake_case: Union[str, Any] =self.feature_extraction_class.from_json_file(a_ )
snake_case: Optional[Any] =feat_extract_first.to_dict()
snake_case: Optional[Any] =feat_extract_second.to_dict()
snake_case: Union[str, Any] =dict_first.pop('mel_filters' )
snake_case: Union[str, Any] =dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize feature_extractor
snake_case: Tuple =self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
snake_case: Any =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case: Optional[Any] =[np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
snake_case: List[Any] =feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
snake_case: Any =feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
snake_case: Optional[Any] =feature_extractor(
a_ , return_tensors='np' , sampling_rate=4_4_1_0_0 , mask_audio=a_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
snake_case: List[Any] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case: List[str] =np.asarray(a_ )
snake_case: str =feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase ( self : Any , a_ : str ) -> Union[str, Any]:
snake_case: Dict =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case: Optional[int] =ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
snake_case: Tuple =self._load_datasamples(1 )
snake_case: Any =TvltFeatureExtractor()
snake_case: Any =feature_extractor(a_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
snake_case: int =torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1E-4 ) )
| 350
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
__SCREAMING_SNAKE_CASE = ['''tokenizer''']
__SCREAMING_SNAKE_CASE = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
'''simple docstring'''
super().__init__(lowerCamelCase )
UpperCamelCase : List[Any] = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCamelCase , lowerCamelCase="speaker_embeddings_path.json" , **lowerCamelCase ) -> List[str]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
UpperCamelCase : Any = get_file_from_repo(
lowerCamelCase , lowerCamelCase , subfolder=kwargs.pop("subfolder" , lowerCamelCase ) , cache_dir=kwargs.pop("cache_dir" , lowerCamelCase ) , force_download=kwargs.pop("force_download" , lowerCamelCase ) , proxies=kwargs.pop("proxies" , lowerCamelCase ) , resume_download=kwargs.pop("resume_download" , lowerCamelCase ) , local_files_only=kwargs.pop("local_files_only" , lowerCamelCase ) , use_auth_token=kwargs.pop("use_auth_token" , lowerCamelCase ) , revision=kwargs.pop("revision" , lowerCamelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(lowerCamelCase , lowerCamelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
UpperCamelCase : Tuple = None
else:
with open(lowerCamelCase ) as speaker_embeddings_json:
UpperCamelCase : List[str] = json.load(lowerCamelCase )
else:
UpperCamelCase : str = None
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase , **lowerCamelCase )
return cls(tokenizer=lowerCamelCase , speaker_embeddings=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase="speaker_embeddings_path.json" , lowerCamelCase="speaker_embeddings" , lowerCamelCase = False , **lowerCamelCase , ) -> Any:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase , lowerCamelCase , "v2" ) , exist_ok=lowerCamelCase )
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[int] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCamelCase : List[Any] = self._load_voice_preset(lowerCamelCase )
UpperCamelCase : Optional[Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , lowerCamelCase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowerCamelCase , )
UpperCamelCase : Any = os.path.join(lowerCamelCase , f'''{prompt_key}_{key}.npy''' )
UpperCamelCase : Union[str, Any] = tmp_dict
with open(os.path.join(lowerCamelCase , lowerCamelCase ) , "w" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
super().save_pretrained(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase = None , **lowerCamelCase ) -> Any:
'''simple docstring'''
UpperCamelCase : List[str] = self.speaker_embeddings[voice_preset]
UpperCamelCase : int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
UpperCamelCase : Optional[Any] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , lowerCamelCase ) , cache_dir=kwargs.pop("cache_dir" , lowerCamelCase ) , force_download=kwargs.pop("force_download" , lowerCamelCase ) , proxies=kwargs.pop("proxies" , lowerCamelCase ) , resume_download=kwargs.pop("resume_download" , lowerCamelCase ) , local_files_only=kwargs.pop("local_files_only" , lowerCamelCase ) , use_auth_token=kwargs.pop("use_auth_token" , lowerCamelCase ) , revision=kwargs.pop("revision" , lowerCamelCase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
UpperCamelCase : List[str] = np.load(lowerCamelCase )
return voice_preset_dict
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase = None ) -> Any:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="pt" , lowerCamelCase=2_56 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=False , **lowerCamelCase , ) -> Optional[Any]:
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCamelCase , lowerCamelCase ):
if (
isinstance(lowerCamelCase , lowerCamelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCamelCase : Optional[Any] = self._load_voice_preset(lowerCamelCase )
else:
if isinstance(lowerCamelCase , lowerCamelCase ) and not voice_preset.endswith(".npz" ):
UpperCamelCase : List[Any] = voice_preset + ".npz"
UpperCamelCase : Optional[Any] = np.load(lowerCamelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase , **lowerCamelCase )
UpperCamelCase : List[str] = BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
UpperCamelCase : List[Any] = self.tokenizer(
lowerCamelCase , return_tensors=lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
if voice_preset is not None:
UpperCamelCase : Optional[Any] = voice_preset
return encoded_text
| 707
|
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( A : int):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_ = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def A__ ( A : int):
'''simple docstring'''
if not isinstance(A , A):
raise ValueError("n must be an integer")
if n <= 0:
raise ValueError("n must be >= 0")
UpperCamelCase : Union[str, Any] = []
for num in range(len(A)):
UpperCamelCase : Any = 0
while 2 * i * i <= odd_composites[num]:
UpperCamelCase : str = odd_composites[num] - 2 * i * i
if is_prime(A):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(A) == n:
return list_nums
return []
def A__ ( ):
'''simple docstring'''
return compute_nums(1)[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 435
| 0
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCAmelCase = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def snake_case_ (__A : Union[str, Any] , __A : str ) -> Any:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def snake_case_ (__A : Tuple ) -> Optional[Any]:
__lowerCAmelCase : int = _TestCommandArgs(dataset=__A , all_configs=__A , save_infos=__A )
__lowerCAmelCase : Optional[int] = TestCommand(*__A )
test_command.run()
__lowerCAmelCase : List[str] = os.path.join(__A , """README.md""" )
assert os.path.exists(__A )
__lowerCAmelCase : Any = DatasetInfosDict.from_directory(__A )
__lowerCAmelCase : int = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__lowerCAmelCase : List[str] = getattr(dataset_infos["""default"""] , __A ), getattr(expected_dataset_infos["""default"""] , __A )
if key == "num_bytes":
assert is_apercent_close(__A , __A )
elif key == "splits":
assert list(__A ) == list(__A )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 651
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
_A : int ='''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_A : int =BASE_URL + '''/user'''
# https://github.com/settings/tokens
_A : List[Any] =os.environ.get('''USER_TOKEN''', '''''')
def __UpperCamelCase ( _lowercase ) -> dict[Any, Any]:
_lowercase : int = {
'Authorization': f'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_lowercase, headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 4
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowercase ( a_ ):
lowerCamelCase : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowerCamelCase : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
lowerCamelCase : str = "text"
lowerCamelCase : str = "summary"
@property
def lowercase__ ( self : List[Any] ):
return {self.text_column: "text", self.summary_column: "summary"}
| 35
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def snake_case_ () -> Optional[Any]:
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__lowerCAmelCase : Dict = parser.parse_args()
return args.f
def snake_case_ (__A : Dict , __A : List[str]="eval" ) -> int:
__lowerCAmelCase : int = os.path.join(__A , f'''{split}_results.json''' )
if os.path.exists(__A ):
with open(__A , """r""" ) as f:
return json.load(__A )
raise ValueError(f'''can\'t find {path}''' )
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : Optional[Any] = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_flax_glue.main()
__lowerCAmelCase : Dict = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : Any = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_clm_flax.main()
__lowerCAmelCase : int = get_results(lowerCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : int = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_summarization_flax.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_mlm_flax.main()
__lowerCAmelCase : List[Any] = get_results(lowerCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_ta_mlm_flax.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 7 if get_gpu_count() > 1 else 2
__lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[Any] = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_flax_ner.main()
__lowerCAmelCase : Dict = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_qa.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 651
| 0
|
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[str] = "AutoTokenizer"
A__ : Union[str, Any] = ["tokenizer"]
A__ : Optional[Any] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE__ )
A__ = speaker_embeddings
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="speaker_embeddings_path.json" , **SCREAMING_SNAKE_CASE__ ) -> int:
if speaker_embeddings_dict_path is not None:
A__ = get_file_from_repo(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , subfolder=kwargs.pop("subfolder" , SCREAMING_SNAKE_CASE__ ) , cache_dir=kwargs.pop("cache_dir" , SCREAMING_SNAKE_CASE__ ) , force_download=kwargs.pop("force_download" , SCREAMING_SNAKE_CASE__ ) , proxies=kwargs.pop("proxies" , SCREAMING_SNAKE_CASE__ ) , resume_download=kwargs.pop("resume_download" , SCREAMING_SNAKE_CASE__ ) , local_files_only=kwargs.pop("local_files_only" , SCREAMING_SNAKE_CASE__ ) , use_auth_token=kwargs.pop("use_auth_token" , SCREAMING_SNAKE_CASE__ ) , revision=kwargs.pop("revision" , SCREAMING_SNAKE_CASE__ ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
A__ = None
else:
with open(SCREAMING_SNAKE_CASE__ ) as speaker_embeddings_json:
A__ = json.load(SCREAMING_SNAKE_CASE__ )
else:
A__ = None
A__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return cls(tokenizer=SCREAMING_SNAKE_CASE__ , speaker_embeddings=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="speaker_embeddings_path.json" , SCREAMING_SNAKE_CASE__="speaker_embeddings" , SCREAMING_SNAKE_CASE__ = False , **SCREAMING_SNAKE_CASE__ , ) -> List[str]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "v2" ) , exist_ok=SCREAMING_SNAKE_CASE__ )
A__ = {}
A__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A__ = self._load_voice_preset(SCREAMING_SNAKE_CASE__ )
A__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , SCREAMING_SNAKE_CASE__ , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=SCREAMING_SNAKE_CASE__ , )
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , f"""{prompt_key}_{key}.npy""" )
A__ = tmp_dict
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , "w" ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
super().save_pretrained(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = self.speaker_embeddings[voice_preset]
A__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
A__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , SCREAMING_SNAKE_CASE__ ) , cache_dir=kwargs.pop("cache_dir" , SCREAMING_SNAKE_CASE__ ) , force_download=kwargs.pop("force_download" , SCREAMING_SNAKE_CASE__ ) , proxies=kwargs.pop("proxies" , SCREAMING_SNAKE_CASE__ ) , resume_download=kwargs.pop("resume_download" , SCREAMING_SNAKE_CASE__ ) , local_files_only=kwargs.pop("local_files_only" , SCREAMING_SNAKE_CASE__ ) , use_auth_token=kwargs.pop("use_auth_token" , SCREAMING_SNAKE_CASE__ ) , revision=kwargs.pop("revision" , SCREAMING_SNAKE_CASE__ ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
A__ = np.load(SCREAMING_SNAKE_CASE__ )
return voice_preset_dict
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = None ) -> Any:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="pt" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> str:
if voice_preset is not None and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A__ = self._load_voice_preset(SCREAMING_SNAKE_CASE__ )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not voice_preset.endswith(".npz" ):
A__ = voice_preset + ".npz"
A__ = np.load(SCREAMING_SNAKE_CASE__ )
if voice_preset is not None:
self._validate_voice_preset_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
A__ = self.tokenizer(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if voice_preset is not None:
A__ = voice_preset
return encoded_text
| 562
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Tuple = ["image_processor", "tokenizer"]
A__ : Optional[int] = "FlavaImageProcessor"
A__ : Optional[int] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE__ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> str:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A__ = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if images is not None:
A__ = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_image_mask=SCREAMING_SNAKE_CASE__ , return_codebook_pixels=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if text is not None and images is not None:
encoding.update(SCREAMING_SNAKE_CASE__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case__ ( self ) -> Any:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def snake_case__ ( self ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 562
| 1
|
from collections.abc import Generator
from math import sin
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) != 32:
raise ValueError("Input must be of length 32" )
a__ : Any =b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
a__ : List[str] =format(SCREAMING_SNAKE_CASE , "08x" )[-8:]
a__ : Optional[Any] =b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
a__ : Optional[int] =b""
for char in message:
bit_string += format(SCREAMING_SNAKE_CASE , "08b" ).encode("utf-8" )
a__ : Optional[Any] =format(len(SCREAMING_SNAKE_CASE ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(SCREAMING_SNAKE_CASE ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(SCREAMING_SNAKE_CASE ) , 512 ):
a__ : Optional[Any] =bit_string[pos : pos + 512]
a__ : str =[]
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
a__ : Dict =format(SCREAMING_SNAKE_CASE , "032b" )
a__ : int =""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(SCREAMING_SNAKE_CASE , 2 )
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return (a + b) % 2**32
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
a__ : Dict =preprocess(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
a__ : Tuple =0x67_452_301
a__ : Dict =0xEF_CDA_B89
a__ : Optional[Any] =0x98_BAD_CFE
a__ : Dict =0x10_325_476
a__ : Optional[Any] =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(SCREAMING_SNAKE_CASE ):
a__ : Optional[int] =aa
a__ : List[str] =ba
a__ : List[str] =ca
a__ : Any =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
a__ : Tuple =d ^ (b & (c ^ d))
a__ : str =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
a__ : int =c ^ (d & (b ^ c))
a__ : List[str] =(5 * i + 1) % 16
elif i <= 47:
a__ : int =b ^ c ^ d
a__ : Union[str, Any] =(3 * i + 5) % 16
else:
a__ : Tuple =c ^ (b | not_aa(SCREAMING_SNAKE_CASE ))
a__ : Union[str, Any] =(7 * i) % 16
a__ : List[Any] =(f + a + added_consts[i] + block_words[g]) % 2**32
a__ : Optional[int] =d
a__ : str =c
a__ : int =b
a__ : List[str] =sum_aa(SCREAMING_SNAKE_CASE , left_rotate_aa(SCREAMING_SNAKE_CASE , shift_amounts[i] ) )
# Add hashed chunk to running total
a__ : List[Any] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Any =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Any =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Tuple =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : List[Any] =reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Any = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=False ):
"""simple docstring"""
a__ : str =[]
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ : str =[(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a__ : Optional[int] =""
else:
a__ : Optional[Any] ="vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : Tuple =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
a__ : Tuple =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ : Any =in_proj_weight[
: config.hidden_size, :
]
a__ : Optional[int] =in_proj_bias[: config.hidden_size]
a__ : int =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : Dict =in_proj_weight[
-config.hidden_size :, :
]
a__ : str =in_proj_bias[-config.hidden_size :]
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : str =["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Union[str, Any] =dct.pop(SCREAMING_SNAKE_CASE )
a__ : int =val
def _A ( ):
"""simple docstring"""
a__ : List[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : int =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
"""simple docstring"""
a__ : Optional[Any] =BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=SCREAMING_SNAKE_CASE , )
a__ : List[str] =ViTHybridConfig(backbone_config=SCREAMING_SNAKE_CASE , image_size=384 , num_labels=1_000 )
a__ : int =False
# load original model from timm
a__ : Tuple =timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a__ : Optional[int] =timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =create_rename_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Optional[int] ="huggingface/label-files"
a__ : Dict ="imagenet-1k-id2label.json"
a__ : Tuple =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
a__ : Tuple ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
a__ : Optional[Any] =idalabel
a__ : Dict ={v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
a__ : Optional[Any] =ViTHybridModel(SCREAMING_SNAKE_CASE ).eval()
else:
a__ : Tuple =ViTHybridForImageClassification(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# create image processor
a__ : Any =create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE ) )
a__ : Any =transform.transforms
a__ : Tuple ={
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
a__ : Any =ViTHybridImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a__ : Optional[int] =prepare_img()
a__ : List[Any] =transform(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
a__ : List[Any] =processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# verify logits
with torch.no_grad():
a__ : Optional[Any] =model(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
a__ : Any =timm_model.forward_features(SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1e-3 )
else:
a__ : Tuple =timm_model(SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 563
| 1
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase_( __magic_name__ : Tuple="" ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = tempfile.mkdtemp()
return os.path.join(lowerCAmelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Union[str, Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
_lowerCAmelCase :Optional[int] = AgentAudio(_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
_lowerCAmelCase , _lowerCAmelCase :List[Any] = sf.read(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , atol=1e-4 ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Dict = torch.rand(12 , dtype=torch.floataa ) - 0.5
_lowerCAmelCase :Tuple = get_new_path(suffix='.wav' )
sf.write(_UpperCAmelCase , _UpperCAmelCase , 1_6000 )
_lowerCAmelCase :Optional[int] = AgentAudio(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , _UpperCAmelCase )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Any = torch.randint(0 , 256 , (64, 64, 3) )
_lowerCAmelCase :Union[str, Any] = AgentImage(_UpperCAmelCase )
_lowerCAmelCase :Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :int = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_lowerCAmelCase :Optional[int] = Image.open(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = AgentImage(_UpperCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :int = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_lowerCAmelCase :Optional[Any] = Image.open(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = AgentImage(_UpperCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[Any] = 'Hey!'
_lowerCAmelCase :str = AgentText(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , agent_type.to_string() )
self.assertEqual(_UpperCAmelCase , agent_type.to_raw() )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 714
|
import math
import sys
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
if number != int(__magic_name__ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
_lowerCAmelCase :Optional[Any] = [-1] * (number + 1)
_lowerCAmelCase :Optional[Any] = 0
for i in range(1 , number + 1 ):
_lowerCAmelCase :Tuple = sys.maxsize
_lowerCAmelCase :int = int(math.sqrt(__magic_name__ ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase :str = 1 + answers[i - (j**2)]
_lowerCAmelCase :List[str] = min(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382
| 0
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase__ (__snake_case ):
"""simple docstring"""
def __lt__( self : List[str] , __a : Tuple ):
return self[-1] < other[-1]
def __eq__( self : Tuple , __a : Any ):
return self[-1] == other[-1]
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : list[Stack] = []
# sort into stacks
for element in collection:
snake_case__ : List[Any] = Stack([element])
snake_case__ : List[str] = bisect_left(UpperCAmelCase_ , UpperCAmelCase_)
if i != len(UpperCAmelCase_):
stacks[i].append(UpperCAmelCase_)
else:
stacks.append(UpperCAmelCase_)
# use a heap-based merge to merge stack efficiently
snake_case__ : Any = merge(*(reversed(UpperCAmelCase_) for stack in stacks))
return collection
if __name__ == "__main__":
lowercase_: Tuple = input('Enter numbers separated by a comma:\n').strip()
lowercase_: List[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 648
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_: Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
lowercase_: str = 5
lowercase_: int = 10
@require_sentencepiece
@require_tokenizers
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = SpeechaTextTokenizer
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = True
def lowercase ( self : Tuple ):
super().setUp()
snake_case__ : str = sp.SentencePieceProcessor()
spm_model.Load(__a )
snake_case__ : List[str] = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__a ) )]
snake_case__ : Dict = dict(zip(__a , range(len(__a ) ) ) )
snake_case__ : Optional[Any] = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
snake_case__ : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : int ):
snake_case__ : Any = """<pad>"""
snake_case__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowercase ( self : Any ):
snake_case__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__a ) , 1_0_0_1 )
def lowercase ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def lowercase ( self : Dict ):
snake_case__ : List[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
snake_case__ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
snake_case__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
snake_case__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
snake_case__ : int = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowercase ( self : List[str] ):
# fmt: off
snake_case__ : Union[str, Any] = {"""input_ids""": [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'valhalla/s2t_mustc_multilinguial_medium'
__UpperCamelCase : Union[str, Any] = 'C\'est trop cool'
__UpperCamelCase : List[str] = 'Esto es genial'
@classmethod
def lowercase ( cls : str ):
snake_case__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase ( self : List[str] ):
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 1_1 )
def lowercase ( self : List[str] ):
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def lowercase ( self : List[Any] ):
self.assertIn(__a , self.tokenizer.all_special_ids )
snake_case__ : Union[str, Any] = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
snake_case__ : Optional[int] = self.tokenizer.decode(__a , skip_special_tokens=__a )
snake_case__ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def lowercase ( self : str ):
snake_case__ : Optional[int] = """fr"""
snake_case__ : List[Any] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __a )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase ( self : int ):
snake_case__ : Any = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
snake_case__ : Dict = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 648
| 1
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
_lowerCAmelCase : int = logging.getLogger(__name__)
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
lowerCAmelCase__ = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'A csv or a json file containing the training data.'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'A csv or a json file containing the validation data.'} )
lowerCAmelCase__ = field(default=__a , metadata={'help': 'A csv or a json file containing the test data.'} )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
_lowerCamelCase : Tuple = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowerCamelCase : str = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class A_ :
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCamelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCamelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCamelCase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowerCamelCase : Tuple = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowerCamelCase : List[Any] = data_args.train_file.split("." )[-1]
_lowerCamelCase : List[str] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowerCamelCase : List[Any] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
_lowerCamelCase : List[str] = load_dataset("csv" , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowerCamelCase : int = load_dataset("json" , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowerCamelCase : Any = raw_datasets["train"].features["label"].names
_lowerCamelCase : Any = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowerCamelCase : Any = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase__ , )
_lowerCamelCase : str = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowerCamelCase : List[str] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCamelCase : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowerCamelCase : Optional[Any] = {"Refused": 0, "Entailed": 1}
_lowerCamelCase : Union[str, Any] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_lowerCamelCase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_lowerCamelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(_lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
_lowerCamelCase : Tuple = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowerCamelCase : str = examples["statement"]
_lowerCamelCase : Any = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
_lowerCamelCase : Dict = tokenizer(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
_lowerCamelCase : int = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
_lowerCamelCase : int = raw_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_lowerCamelCase : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_lowerCamelCase : Tuple = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_lowerCamelCase : int = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_lowerCamelCase : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
_lowerCamelCase : Optional[Any] = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_lowerCamelCase : Tuple = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCamelCase : Tuple = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase__ ) else p.predictions
_lowerCamelCase : Tuple = np.argmax(lowerCAmelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCamelCase : Any = default_data_collator
elif training_args.fpaa:
_lowerCamelCase : str = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 )
else:
_lowerCamelCase : List[Any] = None
# Initialize our Trainer
_lowerCamelCase : Any = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
_lowerCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Any = last_checkpoint
_lowerCamelCase : List[str] = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
_lowerCamelCase : List[str] = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCAmelCase__ )
trainer.save_metrics("train" , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : List[Any] = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
_lowerCamelCase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
_lowerCamelCase : List[str] = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics("eval" , lowerCAmelCase__ )
trainer.save_metrics("eval" , lowerCAmelCase__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowerCamelCase : List[Any] = predict_dataset.remove_columns("label" )
_lowerCamelCase : Tuple = trainer.predict(lowerCAmelCase__ , metric_key_prefix="predict" ).predictions
_lowerCamelCase : Union[str, Any] = np.argmax(lowerCAmelCase__ , axis=1 )
_lowerCamelCase : Any = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCAmelCase__ ):
_lowerCamelCase : str = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
_lowerCamelCase : List[str] = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 706
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ ( _a ):
lowerCAmelCase__ = 'detr'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self: Optional[Any] ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: Any=100 ,__lowerCAmelCase: Dict=6 ,__lowerCAmelCase: str=2_048 ,__lowerCAmelCase: List[Any]=8 ,__lowerCAmelCase: Union[str, Any]=6 ,__lowerCAmelCase: Optional[int]=2_048 ,__lowerCAmelCase: Dict=8 ,__lowerCAmelCase: Optional[int]=0.0 ,__lowerCAmelCase: int=0.0 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: int="relu" ,__lowerCAmelCase: Optional[int]=256 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: List[Any]=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: Tuple=1.0 ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]="sine" ,__lowerCAmelCase: List[Any]="resnet50" ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: int=1 ,__lowerCAmelCase: Union[str, Any]=5 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: List[Any]=1 ,__lowerCAmelCase: int=5 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: List[str]=0.1 ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_lowerCamelCase : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = backbone_config.get("model_type" )
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Optional[Any] = config_class.from_dict(__lowerCAmelCase )
# set timm attributes to None
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = None, None, None
_lowerCamelCase : Optional[int] = use_timm_backbone
_lowerCamelCase : List[Any] = backbone_config
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[str] = d_model
_lowerCamelCase : Any = encoder_ffn_dim
_lowerCamelCase : List[str] = encoder_layers
_lowerCamelCase : List[Any] = encoder_attention_heads
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : Optional[Any] = decoder_layers
_lowerCamelCase : str = decoder_attention_heads
_lowerCamelCase : List[str] = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : Union[str, Any] = activation_dropout
_lowerCamelCase : int = activation_function
_lowerCamelCase : List[Any] = init_std
_lowerCamelCase : int = init_xavier_std
_lowerCamelCase : Union[str, Any] = encoder_layerdrop
_lowerCamelCase : List[str] = decoder_layerdrop
_lowerCamelCase : int = encoder_layers
_lowerCamelCase : Any = auxiliary_loss
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : int = backbone
_lowerCamelCase : int = use_pretrained_backbone
_lowerCamelCase : Dict = dilation
# Hungarian matcher
_lowerCamelCase : Tuple = class_cost
_lowerCamelCase : List[str] = bbox_cost
_lowerCamelCase : int = giou_cost
# Loss coefficients
_lowerCamelCase : List[Any] = mask_loss_coefficient
_lowerCamelCase : Optional[Any] = dice_loss_coefficient
_lowerCamelCase : Dict = bbox_loss_coefficient
_lowerCamelCase : Tuple = giou_loss_coefficient
_lowerCamelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCAmelCase ,**__lowerCAmelCase )
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return self.d_model
@classmethod
def _lowercase ( cls: Dict ,__lowerCAmelCase: PretrainedConfig ,**__lowerCAmelCase: int ):
'''simple docstring'''
return cls(backbone_config=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCamelCase : Union[str, Any] = self.backbone_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return 1e-5
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return 12
| 386
| 0
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( A , A , A , A , A = 16 ) -> Optional[Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = DatasetDict(
{
'''train''': dataset['''train'''].select(A ),
'''validation''': dataset['''train'''].select(A ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''test'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader, test_dataloader
def _snake_case ( A , A ) -> int:
# New Code #
lowerCAmelCase__ = []
# Download the dataset
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
lowerCAmelCase__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['''lr''']
lowerCAmelCase__ = int(config['''num_epochs'''] )
lowerCAmelCase__ = int(config['''seed'''] )
lowerCAmelCase__ = int(config['''batch_size'''] )
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(A )
# New Code #
# Create our folds:
lowerCAmelCase__ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
lowerCAmelCase__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_fold_dataloaders(
A , A , A , A , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , A )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase__ = []
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase__ = torch.cat(A , dim=0 )
lowerCAmelCase__ = torch.stack(A , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase__ = metric.compute(predictions=A , references=A )
accelerator.print('''Average test metrics from all folds:''' , A )
def _snake_case ( ) -> List[str]:
lowerCAmelCase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=A , default=3 , help='''The number of splits to perform across the dataset''' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 90
|
"""simple docstring"""
_A = 256
# Modulus to hash a string
_A = 1_000_003
def lowercase (_snake_case ,_snake_case ) -> bool:
'''simple docstring'''
__UpperCamelCase = len(_snake_case )
__UpperCamelCase = len(_snake_case )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase () -> None:
'''simple docstring'''
__UpperCamelCase = "abc1abc12"
__UpperCamelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__UpperCamelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_snake_case ,_snake_case ) and not rabin_karp(_snake_case ,_snake_case )
# Test 2)
__UpperCamelCase = "ABABX"
__UpperCamelCase = "ABABZABABYABABX"
assert rabin_karp(_snake_case ,_snake_case )
# Test 3)
__UpperCamelCase = "AAAB"
__UpperCamelCase = "ABAAAAAB"
assert rabin_karp(_snake_case ,_snake_case )
# Test 4)
__UpperCamelCase = "abcdabcy"
__UpperCamelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_snake_case ,_snake_case )
# Test 5)
__UpperCamelCase = "Lü"
__UpperCamelCase = "Lüsai"
assert rabin_karp(_snake_case ,_snake_case )
__UpperCamelCase = "Lue"
assert not rabin_karp(_snake_case ,_snake_case )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 505
| 0
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="shi-labs/oneformer_demo" ):
with open(hf_hub_download(lowerCamelCase_, lowerCamelCase_, repo_type='dataset' ), 'r' ) as f:
SCREAMING_SNAKE_CASE = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for key, info in class_info.items():
SCREAMING_SNAKE_CASE = info["""name"""]
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE = thing_ids
SCREAMING_SNAKE_CASE = class_names
return metadata
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=30 , lowercase__=400 , lowercase__=None , lowercase__=True , lowercase__=True , lowercase__=[0.5, 0.5, 0.5] , lowercase__=[0.5, 0.5, 0.5] , lowercase__=10 , lowercase__=False , lowercase__=255 , lowercase__="shi-labs/oneformer_demo" , lowercase__="ade20k_panoptic.json" , lowercase__=10 , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = class_info_file
SCREAMING_SNAKE_CASE = prepare_metadata(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE = num_text
SCREAMING_SNAKE_CASE = repo_path
# for the post_process_functions
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = do_reduce_labels
SCREAMING_SNAKE_CASE = ignore_index
def A ( self ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self , lowercase__ , lowercase__=False ) -> Optional[int]:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image ):
SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(lowerCamelCase_ , key=lambda lowercase__ : item[0] )[0]
SCREAMING_SNAKE_CASE = max(lowerCamelCase_ , key=lambda lowercase__ : item[1] )[1]
return expected_height, expected_width
def A ( self ) -> int:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCAmelCase_ : Optional[Any] = image_processing_class
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = OneFormerImageProcessorTester(self )
@property
def A ( self ) -> str:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'ignore_index' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'class_info_file' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'num_text' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'repo_path' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'metadata' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_reduce_labels' ) )
def A ( self ) -> List[Any]:
"""simple docstring"""
pass
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = image_processor(
lowerCamelCase_ , ['semantic'] * len(lowerCamelCase_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = image_processor(
lowerCamelCase_ , ['semantic'] * len(lowerCamelCase_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = image_processor(
lowerCamelCase_ , ['semantic'] * len(lowerCamelCase_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self , lowercase__=False , lowercase__=False , lowercase__="np" ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
SCREAMING_SNAKE_CASE = self.image_processing_tester.num_labels
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ )
if with_segmentation_maps:
SCREAMING_SNAKE_CASE = num_labels
if is_instance_map:
SCREAMING_SNAKE_CASE = list(range(lowerCamelCase_ ) ) * 2
SCREAMING_SNAKE_CASE = dict(enumerate(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
SCREAMING_SNAKE_CASE = [Image.fromarray(lowerCamelCase_ ) for annotation in annotations]
SCREAMING_SNAKE_CASE = image_processor(
lowerCamelCase_ , ['semantic'] * len(lowerCamelCase_ ) , lowerCamelCase_ , return_tensors='pt' , instance_id_to_semantic_id=lowerCamelCase_ , pad_and_return_pixel_mask=lowerCamelCase_ , )
return inputs
def A ( self ) -> Any:
"""simple docstring"""
pass
def A ( self ) -> Optional[int]:
"""simple docstring"""
def common(lowercase__=False , lowercase__=None ):
SCREAMING_SNAKE_CASE = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCamelCase_ , is_instance_map=lowerCamelCase_ , segmentation_type=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = inputs["""mask_labels"""]
SCREAMING_SNAKE_CASE = inputs["""class_labels"""]
SCREAMING_SNAKE_CASE = inputs["""pixel_values"""]
SCREAMING_SNAKE_CASE = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCamelCase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCamelCase_ )
common(is_instance_map=lowerCamelCase_ , segmentation_type='pil' )
common(is_instance_map=lowerCamelCase_ , segmentation_type='pil' )
def A ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.zeros((20, 50) )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = binary_mask_to_rle(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
SCREAMING_SNAKE_CASE = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(lowerCamelCase_ , target_sizes=lowerCamelCase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE = image_processor.post_process_instance_segmentation(lowerCamelCase_ , threshold=0 )
self.assertTrue(len(lowerCamelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , lowerCamelCase_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE = image_processor.post_process_panoptic_segmentation(lowerCamelCase_ , threshold=0 )
self.assertTrue(len(lowerCamelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , lowerCamelCase_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 713
|
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
snake_case = logging.get_logger(__name__)
snake_case = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__ = None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = max_position_embeddings
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE = input_ids.shape[-1]
SCREAMING_SNAKE_CASE = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'exceptions, performance degradation, or nothing at all.' )
return is_done
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__ ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'with `max_length = start_length + max_new_tokens` instead.' , lowercase__ , )
SCREAMING_SNAKE_CASE = start_length
SCREAMING_SNAKE_CASE = max_new_tokens
SCREAMING_SNAKE_CASE = start_length + max_new_tokens
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__ = None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = max_time
SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
return any(criteria(lowercase__ , lowercase__ ) for criteria in self )
@property
def A ( self ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
elif isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
return None
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = stopping_criteria.max_length
SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter', SCREAMING_SNAKE_CASE_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE_ ) )
return new_stopping_criteria
| 406
| 0
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 618
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ :List[Any] = {
'''sample_size''': 3_2,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [3_2, 6_4],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ :List[str] = {
'''sample_size''': 6_4,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ :Any = {
'''sample_size''': 2_5_6,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ :Optional[int] = {
'''num_train_timesteps''': 4_0,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCAmelCase__ :Optional[int] = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCAmelCase__ :Any = {
'''num_train_timesteps''': 1_5_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCAmelCase__ ( a__: Optional[int] ) -> int:
'''simple docstring'''
if isinstance(a__ , a__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCAmelCase__ ( a__: Dict , a__: int , a__: Union[str, Any] , a__: Dict , a__: Optional[int]=False ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCAmelCase = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCAmelCase__ ( a__: Any , a__: Any , a__: List[str] , a__: List[Any] , a__: List[str]=None ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCAmelCase = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCAmelCase = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCAmelCase = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCAmelCase = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCAmelCase__ ( a__: str , a__: Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = torch.load(a__ , map_location='cpu' )
_UpperCAmelCase = {}
_UpperCAmelCase = checkpoint['time_embed.0.weight']
_UpperCAmelCase = checkpoint['time_embed.0.bias']
_UpperCAmelCase = checkpoint['time_embed.2.weight']
_UpperCAmelCase = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCAmelCase = checkpoint['label_emb.weight']
_UpperCAmelCase = checkpoint['input_blocks.0.0.weight']
_UpperCAmelCase = checkpoint['input_blocks.0.0.bias']
_UpperCAmelCase = unet_config['down_block_types']
_UpperCAmelCase = unet_config['layers_per_block']
_UpperCAmelCase = unet_config['attention_head_dim']
_UpperCAmelCase = unet_config['block_out_channels']
_UpperCAmelCase = 1
_UpperCAmelCase = channels_list[0]
for i, layer_type in enumerate(a__ ):
_UpperCAmelCase = channels_list[i]
_UpperCAmelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(a__ ):
_UpperCAmelCase = F'''down_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.0'''
_UpperCAmelCase = True if j == 0 and downsample_block_has_skip else False
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(a__ ):
_UpperCAmelCase = F'''down_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.0'''
_UpperCAmelCase = True if j == 0 and downsample_block_has_skip else False
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
_UpperCAmelCase = F'''down_blocks.{i}.attentions.{j}'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.1'''
_UpperCAmelCase = convert_attention(
a__ , a__ , a__ , a__ , a__ )
current_layer += 1
if i != len(a__ ) - 1:
_UpperCAmelCase = F'''down_blocks.{i}.downsamplers.0'''
_UpperCAmelCase = F'''input_blocks.{current_layer}.0'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
current_layer += 1
_UpperCAmelCase = current_channels
# hardcoded the mid-block for now
_UpperCAmelCase = 'mid_block.resnets.0'
_UpperCAmelCase = 'middle_block.0'
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
_UpperCAmelCase = 'mid_block.attentions.0'
_UpperCAmelCase = 'middle_block.1'
_UpperCAmelCase = convert_attention(a__ , a__ , a__ , a__ , a__ )
_UpperCAmelCase = 'mid_block.resnets.1'
_UpperCAmelCase = 'middle_block.2'
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
_UpperCAmelCase = 0
_UpperCAmelCase = unet_config['up_block_types']
for i, layer_type in enumerate(a__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCAmelCase = F'''up_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''output_blocks.{current_layer}.0'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
current_layer += 1
if i != len(a__ ) - 1:
_UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0'''
_UpperCAmelCase = F'''output_blocks.{current_layer-1}.1'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCAmelCase = F'''up_blocks.{i}.resnets.{j}'''
_UpperCAmelCase = F'''output_blocks.{current_layer}.0'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ , has_skip=a__ )
_UpperCAmelCase = F'''up_blocks.{i}.attentions.{j}'''
_UpperCAmelCase = F'''output_blocks.{current_layer}.1'''
_UpperCAmelCase = convert_attention(
a__ , a__ , a__ , a__ , a__ )
current_layer += 1
if i != len(a__ ) - 1:
_UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0'''
_UpperCAmelCase = F'''output_blocks.{current_layer-1}.2'''
_UpperCAmelCase = convert_resnet(a__ , a__ , a__ , a__ )
_UpperCAmelCase = checkpoint['out.0.weight']
_UpperCAmelCase = checkpoint['out.0.bias']
_UpperCAmelCase = checkpoint['out.2.weight']
_UpperCAmelCase = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCAmelCase__ :Dict = parser.parse_args()
lowerCAmelCase__ :List[str] = strabool(args.class_cond)
lowerCAmelCase__ :Any = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ :Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ :Tuple = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ :Any = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowerCAmelCase__ :str = None
lowerCAmelCase__ :Tuple = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ :Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ :int = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ :Union[str, Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ :Tuple = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
lowerCAmelCase__ :Any = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ :Union[str, Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 618
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE_ ( lowercase__ ):
'''simple docstring'''
lowercase : List[Any] = "Speech2TextFeatureExtractor"
lowercase : str = "Speech2TextTokenizer"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
A : str =self.feature_extractor
A : Optional[int] =False
def __call__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A : Tuple =kwargs.pop('raw_speech' )
else:
A : Optional[int] =kwargs.pop('audio' , __lowerCamelCase )
A : Any =kwargs.pop('sampling_rate' , __lowerCamelCase )
A : Optional[int] =kwargs.pop('text' , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A : List[str] =args[0]
A : int =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A : List[str] =self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
A : str =self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A : List[str] =encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A : int =True
A : Dict =self.tokenizer
yield
A : Tuple =self.feature_extractor
A : Any =False
| 705
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = "deberta-v2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[str]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Optional[int] =intermediate_size
A : Any =hidden_act
A : Any =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Tuple =type_vocab_size
A : Tuple =initializer_range
A : int =relative_attention
A : int =max_relative_positions
A : Optional[Any] =pad_token_id
A : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Any =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Any =pos_att_type
A : Tuple =vocab_size
A : Any =layer_norm_eps
A : Optional[Any] =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : str =pooler_dropout
A : Any =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
A : str =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = IFInpaintingSuperResolutionPipeline
lowerCamelCase_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCamelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
lowerCamelCase_ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _lowercase ( self ) -> Tuple:
return self._get_superresolution_dummy_components()
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[Any]:
if str(UpperCamelCase__ ).startswith("mps" ):
lowerCamelCase : int = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase : int = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowercase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowercase ( self ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowercase ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowercase ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowercase ( self ) -> Dict:
self._test_save_load_local()
def _lowercase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 311
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[Any]:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = eval_examples
lowerCamelCase : Optional[int] = post_process_function
def _lowercase ( self , UpperCamelCase__ = None , UpperCamelCase__=None , UpperCamelCase__ = None , UpperCamelCase__ = "eval" , **UpperCamelCase__ , ) -> Dict[str, float]:
lowerCamelCase : Dict = gen_kwargs.copy()
lowerCamelCase : List[str] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase : List[str] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase : Optional[Any] = gen_kwargs
lowerCamelCase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase : List[str] = self.get_eval_dataloader(UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : Dict = self.compute_metrics
lowerCamelCase : Any = None
lowerCamelCase : Optional[int] = time.time()
lowerCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase : Dict = eval_loop(
UpperCamelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
lowerCamelCase : Union[str, Any] = compute_metrics
lowerCamelCase : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase : List[str] = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCamelCase : Any = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
else:
lowerCamelCase : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__ = "test" , **UpperCamelCase__ ) -> int:
lowerCamelCase : str = gen_kwargs.copy()
lowerCamelCase : str = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : Union[str, Any] = self.compute_metrics
lowerCamelCase : int = None
lowerCamelCase : Optional[int] = time.time()
lowerCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase : Any = eval_loop(
UpperCamelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
lowerCamelCase : Tuple = compute_metrics
lowerCamelCase : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase : str = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , "predict" )
lowerCamelCase : Dict = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCamelCase : int = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
| 311
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'unispeech'
def __init__( self :Optional[Any] , _lowercase :str=32 , _lowercase :Optional[Any]=7_68 , _lowercase :List[Any]=12 , _lowercase :List[str]=12 , _lowercase :List[Any]=30_72 , _lowercase :Optional[Any]="gelu" , _lowercase :str=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :List[Any]=0.0 , _lowercase :int=0.0 , _lowercase :Union[str, Any]=0.1 , _lowercase :Dict=0.1 , _lowercase :int=0.02 , _lowercase :int=1e-5 , _lowercase :List[Any]="group" , _lowercase :int="gelu" , _lowercase :Any=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _lowercase :Any=(5, 2, 2, 2, 2, 2, 2) , _lowercase :str=(10, 3, 3, 3, 3, 2, 2) , _lowercase :str=False , _lowercase :Optional[int]=1_28 , _lowercase :int=16 , _lowercase :Dict=False , _lowercase :List[Any]=True , _lowercase :int=0.05 , _lowercase :Dict=10 , _lowercase :Optional[int]=2 , _lowercase :str=0.0 , _lowercase :Tuple=10 , _lowercase :Optional[int]=0 , _lowercase :List[Any]=3_20 , _lowercase :Tuple=2 , _lowercase :Optional[int]=0.1 , _lowercase :List[Any]=1_00 , _lowercase :Optional[Any]=2_56 , _lowercase :Any=2_56 , _lowercase :List[Any]=0.1 , _lowercase :Optional[Any]="mean" , _lowercase :int=False , _lowercase :Tuple=False , _lowercase :Tuple=2_56 , _lowercase :Optional[int]=80 , _lowercase :Union[str, Any]=0 , _lowercase :Tuple=1 , _lowercase :Optional[int]=2 , _lowercase :str=0.5 , **_lowercase :Tuple , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(_lowercase )
lowercase__ = list(_lowercase )
lowercase__ = list(_lowercase )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = num_ctc_classes
lowercase__ = vocab_size
lowercase__ = do_stable_layer_norm
lowercase__ = use_weighted_layer_sum
lowercase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ = num_codevectors_per_group
lowercase__ = num_codevector_groups
lowercase__ = contrastive_logits_temperature
lowercase__ = feat_quantizer_dropout
lowercase__ = num_negatives
lowercase__ = codevector_dim
lowercase__ = proj_codevector_dim
lowercase__ = diversity_loss_weight
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# pretraining loss
lowercase__ = replace_prob
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 611
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
@staticmethod
def UpperCAmelCase ( *_lowercase :List[Any] , **_lowercase :Tuple ):
'''simple docstring'''
pass
def _A ( __magic_name__ ):
lowercase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase ( self :Dict , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[int] , _lowercase :str ):
'''simple docstring'''
lowercase__ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , _lowercase )
import datasets
lowercase__ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowercase__ = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "Intel/dpt-large"
lowercase__ = pipeline("depth-estimation" , model=_lowercase )
lowercase__ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
lowercase__ = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 611
| 1
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowercase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_lowercase = get_tests_dir("""fixtures/vocab.json""")
_lowercase = get_tests_dir("""fixtures""")
class lowercase_ ( unittest.TestCase ):
__lowerCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Any =0
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Dict =WavaVecaConfig()
SCREAMING_SNAKE_CASE_ : Optional[Any] =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(_a )
processor.save_pretrained(_a )
SCREAMING_SNAKE_CASE_ : str =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_a , os.path.join(_a , _a ) )
copyfile(_a , os.path.join(_a , '''vocab.json''' ) )
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : int =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : List[str] =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE_ : List[str] =WavaVecaProcessor(_a , _a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in tokenizer
with open(os.path.join(_a , _a ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict =json.load(_a )
config_dict.pop('''processor_class''' )
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write(json.dumps(_a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : str =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : List[Any] =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE_ : int =WavaVecaProcessor(_a , _a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in feature extractor
with open(os.path.join(_a , _a ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] =json.load(_a )
config_dict.pop('''processor_class''' )
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write(json.dumps(_a ) )
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Optional[Any] =WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(_a )
# copy relevant files
copyfile(_a , os.path.join(_a , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write('''{}''' )
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> Any:
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
SCREAMING_SNAKE_CASE_ : List[str] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
SCREAMING_SNAKE_CASE_ : int =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a , use_fast=_a )
SCREAMING_SNAKE_CASE_ : Tuple =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _snake_case ( self ) -> List[str]:
try:
AutoConfig.register('''custom''' , _a )
AutoFeatureExtractor.register(_a , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoProcessor.register(_a , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ : Any =CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : str =os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : str =CustomTokenizer(_a )
SCREAMING_SNAKE_CASE_ : List[Any] =CustomProcessor(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_a )
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Optional[Any]:
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase = False
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase = False
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase = "AutoFeatureExtractor"
__lowerCamelCase = "AutoTokenizer"
__lowerCamelCase = False
try:
AutoConfig.register('''custom''' , _a )
AutoFeatureExtractor.register(_a , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoProcessor.register(_a , _a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE_ : int =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE_ : List[str] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class lowercase_ ( unittest.TestCase ):
__lowerCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _snake_case ( cls ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Tuple =TOKEN
HfFolder.save_token(_a )
@classmethod
def _snake_case ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a , '''test-processor''' ) , push_to_hub=_a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(new_processor.feature_extractor , _a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Dict =WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a , '''test-processor-org''' ) , push_to_hub=_a , use_auth_token=self._token , organization='''valid_org''' , )
SCREAMING_SNAKE_CASE_ : Any =WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(new_processor.feature_extractor , _a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ) -> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ : Tuple =CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Tuple =os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : int =CustomTokenizer(_a )
SCREAMING_SNAKE_CASE_ : str =CustomProcessor(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
SCREAMING_SNAKE_CASE_ : List[Any] =Repository(_a , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(_a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_a , '''tokenizer_config.json''' ) ) as f:
SCREAMING_SNAKE_CASE_ : List[str] =json.load(_a )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_processing.py''' ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=_a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 443
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665
| 0
|
import colorsys
from PIL import Image # type: ignore
def __magic_name__ ( __a : float , __a : float , __a : int ):
'''simple docstring'''
UpperCamelCase__ = x
UpperCamelCase__ = y
for step in range(UpperCAmelCase__ ): # noqa: B007
UpperCamelCase__ = a * a - b * b + x
UpperCamelCase__ = 2 * a * b + y
UpperCamelCase__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __magic_name__ ( __a : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __magic_name__ ( __a : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def __magic_name__ ( __a : int = 800 , __a : int = 600 , __a : float = -0.6 , __a : float = 0 , __a : float = 3.2 , __a : int = 50 , __a : bool = True , ):
'''simple docstring'''
UpperCamelCase__ = Image.new("""RGB""" , (image_width, image_height) )
UpperCamelCase__ = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase__ = figure_width / image_width * image_height
UpperCamelCase__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase__ = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase__ = get_color_coded_rgb(UpperCAmelCase__ )
else:
UpperCamelCase__ = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 708
|
from timeit import timeit
def __magic_name__ ( __a : int ):
'''simple docstring'''
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase__ = 0
while number:
number &= number - 1
result += 1
return result
def __magic_name__ ( __a : int ):
'''simple docstring'''
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase__ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __magic_name__ ( ):
'''simple docstring'''
def do_benchmark(__a : int ) -> None:
UpperCamelCase__ = """import __main__ as z"""
print(f"Benchmark when {number = }:" )
print(f"{get_set_bits_count_using_modulo_operator(__a ) = }" )
UpperCamelCase__ = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__a )
print(f"timeit() runs in {timing} seconds" )
print(f"{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }" )
UpperCamelCase__ = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__a , )
print(f"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 86
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Any:
a_ : Optional[int] = b.T
a_ : Dict = np.sum(np.square(SCREAMING_SNAKE_CASE__ ), axis=1 )
a_ : int = np.sum(np.square(SCREAMING_SNAKE_CASE__ ), axis=0 )
a_ : Tuple = np.matmul(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
a_ : int = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
a_ : int = x.reshape(-1, 3 )
a_ : Union[str, Any] = squared_euclidean_distance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
return np.argmin(SCREAMING_SNAKE_CASE__, axis=1 )
class snake_case_ ( a_ ):
__lowerCAmelCase = ["pixel_values"]
def __init__( self , a_ = None , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = True , **a_ , ):
super().__init__(**a_ )
a_ : Optional[int] = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
a_ : Optional[Any] = get_size_dict(a_ )
a_ : Tuple = np.array(a_ ) if clusters is not None else None
a_ : str = do_resize
a_ : str = size
a_ : Optional[Any] = resample
a_ : Any = do_normalize
a_ : Tuple = do_color_quantize
def snake_case_ ( self , a_ , a_ , a_ = PILImageResampling.BILINEAR , a_ = None , **a_ , ):
a_ : str = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
a_ , size=(size["height"], size["width"]) , resample=a_ , data_format=a_ , **a_ )
def snake_case_ ( self , a_ , a_ = None , ):
a_ : List[str] = rescale(image=a_ , scale=1 / 127.5 , data_format=a_ )
a_ : Any = image - 1
return image
def snake_case_ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ):
a_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
a_ : Tuple = size if size is not None else self.size
a_ : List[str] = get_size_dict(a_ )
a_ : Tuple = resample if resample is not None else self.resample
a_ : Any = do_normalize if do_normalize is not None else self.do_normalize
a_ : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a_ : Union[str, Any] = clusters if clusters is not None else self.clusters
a_ : Dict = np.array(a_ )
a_ : Tuple = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
a_ : Union[str, Any] = [to_numpy_array(a_ ) for image in images]
if do_resize:
a_ : int = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_normalize:
a_ : Optional[int] = [self.normalize(image=a_ ) for image in images]
if do_color_quantize:
a_ : Optional[Any] = [to_channel_dimension_format(a_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a_ : int = np.array(a_ )
a_ : str = color_quantize(a_ , a_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a_ : Tuple = images.shape[0]
a_ : Dict = images.reshape(a_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a_ : Dict = list(a_ )
else:
a_ : Dict = [to_channel_dimension_format(a_ , a_ ) for image in images]
a_ : int = {"input_ids": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 237
|
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> list[int]:
a_ : List[str] = int(SCREAMING_SNAKE_CASE__ )
# Initialize Result
a_ : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE__ ):
# Find denominations
while int(SCREAMING_SNAKE_CASE__ ) >= int(SCREAMING_SNAKE_CASE__ ):
total_value -= int(SCREAMING_SNAKE_CASE__ )
answer.append(SCREAMING_SNAKE_CASE__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
SCREAMING_SNAKE_CASE_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 237
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowercase__ ):
@staticmethod
def a__ ( _UpperCamelCase :ArgumentParser ):
snake_case_ : int = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,)
download_parser.add_argument("""model""" ,type=_UpperCamelCase ,help="""Name of the model to download""" )
download_parser.set_defaults(func=_UpperCamelCase )
def __init__( self :List[Any] ,_UpperCamelCase :str ,_UpperCamelCase :str ,_UpperCamelCase :bool ,_UpperCamelCase :bool ):
snake_case_ : Tuple = model
snake_case_ : List[str] = cache
snake_case_ : List[str] = force
snake_case_ : List[str] = trust_remote_code
def a__ ( self :Dict ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 267
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any]=0 ):
'''simple docstring'''
# Format the message.
if name is None:
snake_case_ : Tuple = None
else:
snake_case_ : Optional[Any] = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
snake_case_ : Optional[Any] = fmt.format(lowerCamelCase_ )
# Print and recurse (if needed).
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if msg is not None:
print(lowerCamelCase_ )
for k in val.keys():
recursive_print(lowerCamelCase_ , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase_ , torch.Tensor ):
print(lowerCamelCase_ , """:""" , val.size() )
else:
print(lowerCamelCase_ , """:""" , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int ):
'''simple docstring'''
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : List[Any] = param.view(*lowerCamelCase_ )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : List[str] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : int = param.view(*lowerCamelCase_ )
snake_case_ : str = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*lowerCamelCase_ )
return param
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple ):
'''simple docstring'''
# The converted output model.
snake_case_ : Tuple = {}
# old versions did not store training args
snake_case_ : Optional[Any] = input_state_dict.get("""args""" , lowerCamelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Optional[int] = ds_args.padded_vocab_size
snake_case_ : str = ds_args.max_position_embeddings
snake_case_ : Tuple = ds_args.hidden_size
snake_case_ : List[str] = ds_args.num_layers
snake_case_ : Union[str, Any] = ds_args.num_attention_heads
snake_case_ : Tuple = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : int = config.n_head
# The hidden_size per head.
snake_case_ : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Tuple = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : Dict = 0.0
# The model.
snake_case_ : Optional[Any] = input_state_dict["""model"""]
# The language model.
snake_case_ : Optional[Any] = model["""language_model"""]
# The embeddings.
snake_case_ : int = lm["""embedding"""]
# The word embeddings.
snake_case_ : Any = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Any = word_embeddings[: config.vocab_size, :]
snake_case_ : Union[str, Any] = word_embeddings
# The position embeddings.
snake_case_ : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
snake_case_ : List[str] = pos_embeddings
# The transformer.
snake_case_ : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Union[str, Any] = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : Tuple = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : List[Any] = layer_re.match(lowerCamelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Any = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : List[str] = m.group(3 )
# The name of the layer.
snake_case_ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : str = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : List[str] = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : Optional[Any] = masked_bias
snake_case_ : Optional[Any] = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : Any = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Dict = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Store. No change of shape.
snake_case_ : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : str = transformer["""final_layernorm.weight"""]
snake_case_ : int = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def UpperCAmelCase ( ):
'''simple docstring'''
# Create the argument parser.
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=lowerCamelCase_ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=lowerCamelCase_ , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : Tuple = parser.parse_args()
# Extract the basename.
snake_case_ : List[str] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Dict = torch.load(lowerCamelCase_ , map_location="""cpu""" )
else:
snake_case_ : Optional[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Optional[int] = input_state_dict.get("""args""" , lowerCamelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Tuple = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : int = """gelu_new"""
else:
snake_case_ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : int = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : int = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowerCamelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=lowerCamelCase_ , summary_activation=lowerCamelCase_ , summary_proj_to_labels=lowerCamelCase_ , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
snake_case_ : int = GPTaConfig.from_json_file(args.config_file )
snake_case_ : Optional[int] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Optional[Any] = convert_megatron_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase_ , lowerCamelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : List[str] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
snake_case_ : int = type(lowerCamelCase_ ).__name__
snake_case_ : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowerCamelCase_ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase_ )
# Store the state_dict to file.
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 267
| 1
|
from math import isqrt, loga
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]]
def lowerCamelCase__ (_UpperCAmelCase = 80_0800 , _UpperCAmelCase = 80_0800):
SCREAMING_SNAKE_CASE = degree * loga(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = int(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = calculate_prime_numbers(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "Wav2Vec2FeatureExtractor"
a = "AutoTokenizer"
def __init__( self : Union[str, Any] , _A : Union[str, Any] , _A : int):
"""simple docstring"""
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : Dict = self.feature_extractor
_SCREAMING_SNAKE_CASE : Any = False
@classmethod
def _lowerCAmelCase ( cls : Tuple , _A : Optional[Any] , **_A : Any):
"""simple docstring"""
try:
return super().from_pretrained(_A , **_A)
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , _A , )
_SCREAMING_SNAKE_CASE : List[str] = WavaVecaFeatureExtractor.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaCTCTokenizer.from_pretrained(_A , **_A)
return cls(feature_extractor=_A , tokenizer=_A)
def __call__( self : str , *_A : Union[str, Any] , **_A : Optional[int]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_A , **_A)
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""")
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""raw_speech""")
else:
_SCREAMING_SNAKE_CASE : Any = kwargs.pop("""audio""" , _A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""sampling_rate""" , _A)
_SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""text""" , _A)
if len(_A) > 0:
_SCREAMING_SNAKE_CASE : Any = args[0]
_SCREAMING_SNAKE_CASE : int = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if audio is not None:
_SCREAMING_SNAKE_CASE : Any = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A)
if text is not None:
_SCREAMING_SNAKE_CASE : int = self.tokenizer(_A , **_A)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE : Tuple = encodings["""input_ids"""]
return inputs
def _lowerCAmelCase ( self : int , *_A : Optional[int] , **_A : List[Any]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A)
_SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""input_features""" , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("""labels""" , _A)
if len(_A) > 0:
_SCREAMING_SNAKE_CASE : List[str] = args[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = args[1:]
if input_features is not None:
_SCREAMING_SNAKE_CASE : int = self.feature_extractor.pad(_A , *_A , **_A)
if labels is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.pad(_A , **_A)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_SCREAMING_SNAKE_CASE : Any = labels["""input_ids"""]
return input_features
def _lowerCAmelCase ( self : str , *_A : Dict , **_A : Tuple):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Dict , *_A : Optional[Any] , **_A : Optional[int]):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@contextmanager
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""")
_SCREAMING_SNAKE_CASE : Optional[Any] = True
_SCREAMING_SNAKE_CASE : str = self.tokenizer
yield
_SCREAMING_SNAKE_CASE : str = self.feature_extractor
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
| 338
| 0
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case :Union[str, Any] =logging.get_logger(__name__)
enable_full_determinism()
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Dict = UNetaDModel
A_ : str = 'sample'
@property
def __UpperCamelCase ( self : Tuple ) -> Any:
A = 4
A = 3
A = (32, 32)
A = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
A = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCamelCase ( self : List[str] ) -> List[str]:
return (3, 32, 32)
@property
def __UpperCamelCase ( self : List[Any] ) -> Any:
return (3, 32, 32)
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
A = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
A = self.dummy_input
return init_dict, inputs_dict
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : List[str] = UNetaDModel
A_ : List[str] = 'sample'
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
A = 4
A = 4
A = (32, 32)
A = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
A = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCamelCase ( self : List[str] ) -> Tuple:
return (4, 32, 32)
@property
def __UpperCamelCase ( self : List[str] ) -> Any:
return (4, 32, 32)
def __UpperCamelCase ( self : str ) -> List[str]:
A = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
A = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self : Dict ) -> Any:
A , A = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCamelCase )
A = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def __UpperCamelCase ( self : str ) -> Optional[int]:
A , A = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
model.to(__UpperCamelCase )
A = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def __UpperCamelCase ( self : List[str] ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
A , A = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
model_accelerate.to(__UpperCamelCase )
model_accelerate.eval()
A = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
A = noise.to(__UpperCamelCase )
A = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
A = model_accelerate(__UpperCamelCase , __UpperCamelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
A , A = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase , low_cpu_mem_usage=__UpperCamelCase )
model_normal_load.to(__UpperCamelCase )
model_normal_load.eval()
A = model_normal_load(__UpperCamelCase , __UpperCamelCase )['sample']
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def __UpperCamelCase ( self : List[str] ) -> int:
A = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__UpperCamelCase )
A = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A = noise.to(__UpperCamelCase )
A = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
with torch.no_grad():
A = model(__UpperCamelCase , __UpperCamelCase ).sample
A = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) )
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Optional[int] = UNetaDModel
A_ : Optional[int] = 'sample'
@property
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : int=(32, 32) ) -> Optional[int]:
A = 4
A = 3
A = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
A = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCamelCase ( self : str ) -> Any:
return (3, 32, 32)
@property
def __UpperCamelCase ( self : int ) -> str:
return (3, 32, 32)
def __UpperCamelCase ( self : Any ) -> str:
A = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
A = self.dummy_input
return init_dict, inputs_dict
@slow
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
A , A = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCamelCase )
A = self.dummy_input
A = floats_tensor((4, 3) + (256, 256) ).to(__UpperCamelCase )
A = noise
A = model(**__UpperCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def __UpperCamelCase ( self : Tuple ) -> int:
A = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__UpperCamelCase )
A = 4
A = 3
A = (256, 256)
A = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
A = torch.tensor(batch_size * [1e-4] ).to(__UpperCamelCase )
with torch.no_grad():
A = model(__UpperCamelCase , __UpperCamelCase ).sample
A = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-2 ) )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
A = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__UpperCamelCase )
A = 4
A = 3
A = (32, 32)
A = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
A = torch.tensor(batch_size * [1e-4] ).to(__UpperCamelCase )
with torch.no_grad():
A = model(__UpperCamelCase , __UpperCamelCase ).sample
A = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-2 ) )
def __UpperCamelCase ( self : str ) -> Optional[int]:
# not required for this model
pass
| 224
|
class lowerCAmelCase__ :
def __init__( self : str , __UpperCamelCase : str = "" , __UpperCamelCase : bool = False ) -> None:
# Mapping from the first character of the prefix of the node
A = {}
# A node will be a leaf if the tree contains its word
A = is_leaf
A = prefix
def __UpperCamelCase ( self : int , __UpperCamelCase : str ) -> tuple[str, str, str]:
A = 0
for q, w in zip(self.prefix , __UpperCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : int , __UpperCamelCase : list[str] ) -> None:
for word in words:
self.insert(__UpperCamelCase )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : str ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
A = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
A = RadixNode(prefix=__UpperCamelCase , is_leaf=__UpperCamelCase )
else:
A = self.nodes[word[0]]
A , A , A = incoming_node.match(
__UpperCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__UpperCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
A = remaining_prefix
A = self.nodes[matching_string[0]]
A = RadixNode(__UpperCamelCase , __UpperCamelCase )
A = aux_node
if remaining_word == "":
A = True
else:
self.nodes[matching_string[0]].insert(__UpperCamelCase )
def __UpperCamelCase ( self : int , __UpperCamelCase : str ) -> bool:
A = self.nodes.get(word[0] , __UpperCamelCase )
if not incoming_node:
return False
else:
A , A , A = incoming_node.match(
__UpperCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> bool:
A = self.nodes.get(word[0] , __UpperCamelCase )
if not incoming_node:
return False
else:
A , A , A = incoming_node.match(
__UpperCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__UpperCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
A = list(self.nodes.values() )[0]
A = merging_node.is_leaf
self.prefix += merging_node.prefix
A = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
A = False
# If there is 1 edge, we merge it with its child
else:
A = list(incoming_node.nodes.values() )[0]
A = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
A = merging_node.nodes
return True
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : int = 0 ) -> None:
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase_ ( ) -> bool:
'''simple docstring'''
A = 'banana bananas bandana band apple all beast'.split()
A = RadixNode()
root.insert_many(lowerCAmelCase__ )
assert all(root.find(lowerCAmelCase__ ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
assert test_trie()
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
A = RadixNode()
A = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCAmelCase__ )
print('Words:' , lowerCAmelCase__ )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 224
| 1
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> int:
super().__init__(
lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: str= field
SCREAMING_SNAKE_CASE__: Optional[int]= path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__: Optional[Any]= Json(
cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , )
def UpperCamelCase_ ( self ) -> Dict:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__: Dict= self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__: Optional[int]= self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
SCREAMING_SNAKE_CASE__: List[str]= dataset
SCREAMING_SNAKE_CASE__: int= path_or_buf
SCREAMING_SNAKE_CASE__: Dict= batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__: List[Any]= num_proc
SCREAMING_SNAKE_CASE__: int= '''utf-8'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= to_json_kwargs
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: str= self.to_json_kwargs.pop('''path_or_buf''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.to_json_kwargs.pop('''orient''' , '''records''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
SCREAMING_SNAKE_CASE__: int= self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
SCREAMING_SNAKE_CASE__: Dict= self.to_json_kwargs.pop('''compression''' , lowerCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowerCAmelCase ) as buffer:
SCREAMING_SNAKE_CASE__: Optional[Any]= self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''' )
SCREAMING_SNAKE_CASE__: str= self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs )
return written
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= args
SCREAMING_SNAKE_CASE__: Union[str, Any]= query_table(
table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__: List[str]= batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , ) -> int:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
SCREAMING_SNAKE_CASE__: List[str]= self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowerCAmelCase )
return written
| 64
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33
| 0
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=50 , A=0.0_2 , A=True , A=None , ) -> Optional[Any]:
UpperCAmelCase : Any = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Optional[int] = use_input_mask
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[Any] = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase( self ) -> Any:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=A , initializer_range=self.initializer_range , )
def _lowercase( self ) -> Optional[Any]:
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase : List[Any] = True
UpperCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase( self , A , A , A , A , **A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = BertGenerationEncoder(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , **A , ) -> Any:
UpperCAmelCase : Any = True
UpperCAmelCase : Optional[Any] = BertGenerationEncoder(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , **A , ) -> Dict:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[str] = BertGenerationDecoder(config=A ).to(A ).eval()
# first forward pass
UpperCAmelCase : int = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Optional[int] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self , A , A , A , A , *A , ) -> Optional[int]:
UpperCAmelCase : int = BertGenerationDecoder(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = BertGenerationEncoderTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : str = """bert"""
self.model_tester.create_and_check_model(A , A , A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A )
def _lowercase( self ) -> List[str]:
# This regression test was failing with PyTorch < 1.3
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
A , A , A , A , A , A , )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*A )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(A )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCAmelCase : str = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
UpperCAmelCase : Tuple = model(A )[0]
UpperCAmelCase : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , A )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> str:
UpperCAmelCase : Dict = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCAmelCase : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(A )[0]
UpperCAmelCase : str = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , A )
UpperCAmelCase : List[str] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
| 672
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672
| 1
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCAmelCase ( __a : int ) -> str:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
_a : Union[str, Any] = precision
_a : str = ceil(precision / 14 )
_a : Dict = 426_880 * Decimal(10_005 ).sqrt()
_a : Optional[Any] = 1
_a : Optional[Any] = 13_591_409
_a : List[Any] = Decimal(__a )
for k in range(1 ,__a ):
_a : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__a ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a__ = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 14
|
def A ( snake_case__ : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
__snake_case = []
for temp in range(int(snake_case__ ) ):
series.append(f"1/{temp + 1}" if series else '1' )
return series
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 313
| 0
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ : Dict = logging.get_logger(__name__)
class UpperCamelCase ( UpperCamelCase_ ):
A__ = ["""audio_values""", """audio_mask"""]
def __init__( self , snake_case__=2048 , snake_case__=1 , snake_case__=[16, 16] , snake_case__=128 , snake_case__=44100 , snake_case__=86 , snake_case__=2048 , snake_case__=0.0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(
feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , **snake_case__ , )
_SCREAMING_SNAKE_CASE : Optional[int] = spectrogram_length
_SCREAMING_SNAKE_CASE : Optional[int] = num_channels
_SCREAMING_SNAKE_CASE : Any = patch_size
_SCREAMING_SNAKE_CASE : Any = feature_size // self.patch_size[1]
_SCREAMING_SNAKE_CASE : List[str] = n_fft
_SCREAMING_SNAKE_CASE : Optional[Any] = sampling_rate // hop_length_to_sampling_rate
_SCREAMING_SNAKE_CASE : List[str] = sampling_rate
_SCREAMING_SNAKE_CASE : Any = padding_value
_SCREAMING_SNAKE_CASE : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case__ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=snake_case__ , norm="slaney" , mel_scale="slaney" , ).T
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = spectrogram(
snake_case__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
_SCREAMING_SNAKE_CASE : Optional[Any] = log_spec[:, :-1]
_SCREAMING_SNAKE_CASE : Any = log_spec - 20.0
_SCREAMING_SNAKE_CASE : Any = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = None , snake_case__ = False , snake_case__ = False , **snake_case__ , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_SCREAMING_SNAKE_CASE : int = isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_SCREAMING_SNAKE_CASE : Optional[int] = is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_SCREAMING_SNAKE_CASE : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
_SCREAMING_SNAKE_CASE : List[Any] = np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_SCREAMING_SNAKE_CASE : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_SCREAMING_SNAKE_CASE : List[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_SCREAMING_SNAKE_CASE : Optional[int] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , snake_case__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(snake_case__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_SCREAMING_SNAKE_CASE : Optional[int] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_SCREAMING_SNAKE_CASE : List[Any] = np.array(snake_case__ ).astype(np.floataa )
# convert into correct format for padding
_SCREAMING_SNAKE_CASE : Optional[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_SCREAMING_SNAKE_CASE : int = np.ones([len(snake_case__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE : List[str] = padded_audio_features * self.padding_value
for i in range(len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE : str = audio_features[i]
_SCREAMING_SNAKE_CASE : str = feature
# return as BatchFeature
if return_attention_mask:
_SCREAMING_SNAKE_CASE : List[Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'''audio_values''': padded_audio_features}
_SCREAMING_SNAKE_CASE : Dict = BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
return encoded_inputs
| 719
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 295
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( SCREAMING_SNAKE_CASE__ : Union[dict, list, tuple, torch.Tensor] ):
UpperCamelCase :int = []
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple[int, ...] ):
UpperCamelCase :Dict = []
for d in reversed(SCREAMING_SNAKE_CASE__ ):
idx.append(flat_idx % d )
UpperCamelCase :int = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE__ ) )
@torch.jit.ignore
def _A ( SCREAMING_SNAKE_CASE__ : Sequence[int] , SCREAMING_SNAKE_CASE__ : Sequence[int] , SCREAMING_SNAKE_CASE__ : Sequence[int] , SCREAMING_SNAKE_CASE__ : Optional[Sequence[bool]] = None , SCREAMING_SNAKE_CASE__ : Optional[Sequence[bool]] = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(SCREAMING_SNAKE_CASE__ : List[bool] ) -> None:
UpperCamelCase :Optional[int] = True
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase :List[str] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase :Dict = l[reversed_idx]
if start_edges is None:
UpperCamelCase :int = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE__ )
if end_edges is None:
UpperCamelCase :Dict = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
reduce_edge_list(SCREAMING_SNAKE_CASE__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCamelCase :List[Tuple[slice, ...]] = []
UpperCamelCase :List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE__ , s + 1 ) )
else:
break
UpperCamelCase :Tuple[slice, ...] = tuple(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = len(SCREAMING_SNAKE_CASE__ )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase :List[str] = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase :List[Any] = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCamelCase :Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = t.shape[:no_batch_dims]
UpperCamelCase :Optional[int] = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# _get_minimal_slice_set is inclusive
UpperCamelCase :List[str] = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE__ ) )
# Get an ordered list of slices to perform
UpperCamelCase :List[Any] = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
UpperCamelCase :str = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : Dict[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
if not (len(SCREAMING_SNAKE_CASE__ ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCamelCase :List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE__ )]
UpperCamelCase :Optional[int] = tuple([max(SCREAMING_SNAKE_CASE__ ) for s in zip(*SCREAMING_SNAKE_CASE__ )] )
def _prep_inputs(SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCamelCase :str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCamelCase :Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCamelCase :List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCamelCase :Dict[str, Any] = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = None
if _out is not None:
UpperCamelCase :Union[str, Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCamelCase :Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase :Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase :Dict = 0
UpperCamelCase :Optional[Any] = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Chunk the input
if not low_mem:
UpperCamelCase :List[Any] = _select_chunk
else:
UpperCamelCase :Union[str, Any] = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE__ , flat_end=min(SCREAMING_SNAKE_CASE__ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :Dict[str, Any] = tensor_tree_map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Run the layer on the chunk
UpperCamelCase :str = layer(**SCREAMING_SNAKE_CASE__ )
# Allocate space for the output
if out is None:
UpperCamelCase :Optional[int] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE__ )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def assign(SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assign(SCREAMING_SNAKE_CASE__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase :Optional[int] = da[k]
assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for xa, xa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase :Optional[int] = xa
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase :Tuple = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCamelCase :List[Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE__ )
return out
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 512 , ) -> Optional[Any]:
UpperCamelCase :Optional[Any] = max_chunk_size
UpperCamelCase :Optional[int] = None
UpperCamelCase :Optional[tuple] = None
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase :List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCamelCase :Union[str, Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase :List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(SCREAMING_SNAKE_CASE_ ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE_ , chunk_size=SCREAMING_SNAKE_CASE_ )
return True
except RuntimeError:
return False
UpperCamelCase :Optional[Any] = 0
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE_ ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase :int = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase :Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase :str = i
UpperCamelCase :Optional[int] = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
UpperCamelCase :Union[str, Any] = True
for aa, aa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
UpperCamelCase :Any = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> int:
UpperCamelCase :Optional[Any] = True
UpperCamelCase :tuple = tree_map(lambda SCREAMING_SNAKE_CASE_ : a.shape if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE_ )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase :str = False
if not consistent:
UpperCamelCase :int = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 658
|
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658
| 1
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
snake_case_ : Optional[int] = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
snake_case_ : str = F'''https://www.google.com/search?q={query}&num=100'''
snake_case_ : Optional[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
snake_case_ : Union[str, Any] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
snake_case_ : Optional[int] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)["""url"""][0]
webbrowser.open(link)
| 700
|
'''simple docstring'''
import torch
from torch import nn
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase__ , lowerCamelCase__ ) ) )
else:
self.out_projs.append(lowerCamelCase__ )
self.out_layers.append(nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase__ , lowerCamelCase__ ) ) )
self.out_layers.append(nn.Linear(lowerCamelCase__ , r_idx - l_idx ) )
UpperCamelCase = keep_order
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if proj is None:
UpperCamelCase = nn.functional.linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(lowerCamelCase__ , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(lowerCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -1_0_0
UpperCamelCase = torch.zeros_like(lowerCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase__ )
biases.append(lowerCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(lowerCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , lowerCamelCase__ ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , lowerCamelCase__ )
UpperCamelCase = hidden.index_select(0 , lowerCamelCase__ )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(lowerCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase__ )
biases.append(lowerCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 350
| 0
|
def lowercase ( __A : str ) -> str:
'''simple docstring'''
snake_case : Optional[int] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Optional[int] = """"""
snake_case : Tuple = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Any = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Union[str, Any] = [1 for i in range(len(__A ) )]
# for each character in new_string find corresponding palindromic string
snake_case : Tuple = 0
for j in range(len(__A ) ):
snake_case : List[str] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : str = j - k + 1 # noqa: E741
snake_case : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : str = length[j]
snake_case : Optional[Any] = j
# create that string
snake_case : int = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
__SCREAMING_SNAKE_CASE : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = False
# Check that strings has a valid type
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(_A )}.''' )
__SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
__SCREAMING_SNAKE_CASE : Dict = [html_strings]
# Get nodes + xpaths
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
for html_string in html_strings:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A )
nodes.append(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
__SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
__SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 74
| 0
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
A = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
A = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def lowerCamelCase ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : bool , UpperCamelCase : Optional[Dict[int, int]] = None , UpperCamelCase : bool = False , ) -> Optional[Any]:
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCamelCase = new_id
# turn into Numpy arrays
_lowerCamelCase = np.array(UpperCamelCase )
_lowerCamelCase = np.array(UpperCamelCase )
if reduce_labels:
_lowerCamelCase = 2_55
_lowerCamelCase = label - 1
_lowerCamelCase = 2_55
_lowerCamelCase = label != ignore_index
_lowerCamelCase = np.not_equal(UpperCamelCase , UpperCamelCase )
_lowerCamelCase = pred_label[mask]
_lowerCamelCase = np.array(UpperCamelCase )[mask]
_lowerCamelCase = pred_label[pred_label == label]
_lowerCamelCase = np.histogram(UpperCamelCase , bins=UpperCamelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase = np.histogram(UpperCamelCase , bins=UpperCamelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase = np.histogram(UpperCamelCase , bins=UpperCamelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCamelCase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : bool , UpperCamelCase : Optional[Dict[int, int]] = None , UpperCamelCase : bool = False , ) -> Any:
_lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = intersect_and_union(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : bool , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Dict[int, int]] = None , UpperCamelCase : bool = False , ) -> List[str]:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = total_intersect_and_union(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# compute metrics
_lowerCamelCase = {}
_lowerCamelCase = total_area_intersect.sum() / total_area_label.sum()
_lowerCamelCase = total_area_intersect / total_area_union
_lowerCamelCase = total_area_intersect / total_area_label
_lowerCamelCase = np.nanmean(UpperCamelCase )
_lowerCamelCase = np.nanmean(UpperCamelCase )
_lowerCamelCase = all_acc
_lowerCamelCase = iou
_lowerCamelCase = acc
if nan_to_num is not None:
_lowerCamelCase = {metric: np.nan_to_num(UpperCamelCase , nan=UpperCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Optional[int] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def _snake_case ( self : str , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Optional[Any] = None , snake_case__ : int = None , snake_case__ : str = False , ) -> Optional[Any]:
_lowerCamelCase = mean_iou(
results=A__ , gt_seg_maps=A__ , num_labels=A__ , ignore_index=A__ , nan_to_num=A__ , label_map=A__ , reduce_labels=A__ , )
return iou_result
| 700
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 234
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : WhisperForConditionalGeneration , __a : WhisperProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=_lowercase , speech_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , feature_extractor=_lowercase , )
def UpperCamelCase__ ( self : Optional[int] , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCamelCase__ ( self : Optional[int] ):
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __a : List[str] , __a : Union[str, Any]=1_60_00 , __a : int = 5_12 , __a : int = 5_12 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Dict , ):
_a = self.speech_processor.feature_extractor(
_lowercase , return_tensors="pt" , sampling_rate=_lowercase ).input_features.to(self.device )
_a = self.speech_model.generate(_lowercase , max_length=48_00_00 )
_a = self.speech_processor.tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , normalize=_lowercase )[
0
]
if isinstance(_lowercase , _lowercase ):
_a = 1
elif isinstance(_lowercase , _lowercase ):
_a = len(_lowercase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_lowercase )}.' )
# get prompt text embeddings
_a = self.tokenizer(
_lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_a = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_a = text_input_ids[:, : self.tokenizer.model_max_length]
_a = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_a = text_embeddings.shape
_a = text_embeddings.repeat(1 , _lowercase , 1 )
_a = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = 42
if negative_prompt is None:
_a = [""] * batch_size
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !='
f' {type(_lowercase )}.' )
elif isinstance(_lowercase , _lowercase ):
_a = [negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
_a = negative_prompt
_a = text_input_ids.shape[-1]
_a = self.tokenizer(
_lowercase , padding="max_length" , max_length=_lowercase , truncation=_lowercase , return_tensors="pt" , )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_a = uncond_embeddings.shape[1]
_a = uncond_embeddings.repeat(1 , _lowercase , 1 )
_a = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_a = torch.randn(_lowercase , generator=_lowercase , device="cpu" , dtype=_lowercase ).to(
self.device )
else:
_a = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_a = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_a = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
_a = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
_a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
_a = 1 / 0.18215 * latents
_a = self.vae.decode(_lowercase ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a = self.numpy_to_pil(_lowercase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
| 692
|
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[str] , lowerCamelCase_: List[Any]=False ):
"""simple docstring"""
try:
snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
snake_case : Dict = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
A = parse_flag_from_env('RUN_SLOW', default=False)
A = parse_flag_from_env('RUN_REMOTE', default=False)
A = parse_flag_from_env('RUN_LOCAL', default=True)
A = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
A = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
A = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
A = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
A = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
snake_case : Dict = unittest.skip("test requires faiss" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
snake_case : List[Any] = unittest.skip("test requires regex" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
snake_case : Tuple = unittest.skip("test requires elasticsearch" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
snake_case : Dict = unittest.skip("test requires sqlalchemy" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[Any] ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
snake_case : Dict = unittest.skip("test requires PyTorch" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
if not config.TF_AVAILABLE:
snake_case : Tuple = unittest.skip("test requires TensorFlow" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[str] ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
snake_case : str = unittest.skip("test requires JAX" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
snake_case : Union[str, Any] = unittest.skip("test requires Pillow" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[str] ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
def _require_spacy_model(lowerCamelCase_: Dict ):
try:
import spacy # noqa F401
spacy.load(lowerCamelCase_ )
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(lowerCamelCase_ ) )(lowerCamelCase_ )
else:
return test_case
return _require_spacy_model
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
snake_case : Dict = unittest.skip("test is slow" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
snake_case : int = unittest.skip("test is local" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case : List[str] = unittest.skip("test is packaged" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
snake_case : Union[str, Any] = unittest.skip("test requires remote" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( *lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
def decorate(cls: int ):
for name, fn in cls.__dict__.items():
if callable(lowerCamelCase_ ) and name.startswith("test" ):
for decorator in decorators:
snake_case : Optional[Any] = decorator(lowerCamelCase_ )
setattr(cls , lowerCamelCase_ , lowerCamelCase_ )
return cls
return decorate
class _a ( SCREAMING_SNAKE_CASE__):
pass
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
@contextmanager
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase_: Dict=1e-16 ):
"""simple docstring"""
snake_case : Union[str, Any] = requests.Session().request
def timeout_request(lowerCamelCase_: Tuple , lowerCamelCase_: List[Any] , lowerCamelCase_: str , **lowerCamelCase_: List[Any] ):
# Change the url to an invalid url so that the connection hangs
snake_case : Optional[int] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
snake_case : str = timeout
try:
return online_request(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case : Dict = url
snake_case : Union[str, Any] = e.args[0]
snake_case : Any = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]''' ),)
snake_case : List[Any] = (max_retry_error,)
raise
def raise_connection_error(lowerCamelCase_: Tuple , lowerCamelCase_: str , **lowerCamelCase_: int ):
raise requests.ConnectionError("Offline mode is enabled." , request=lowerCamelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __SCREAMING_SNAKE_CASE ( *lowerCamelCase_: List[str] , **lowerCamelCase_: Any ):
"""simple docstring"""
snake_case : Any = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCamelCase_ , **lowerCamelCase_ ) as tmp_dir:
try:
os.chdir(lowerCamelCase_ )
yield
finally:
os.chdir(lowerCamelCase_ )
@contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import gc
gc.collect()
snake_case : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import gc
gc.collect()
snake_case : Optional[int] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Dict ):
"""simple docstring"""
return deepcopy(lowerCamelCase_ ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(lowerCamelCase_ ).integers(0 , 1_0_0 , 1_0 ).tolist()
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[Any] ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCamelCase_: Dict , *lowerCamelCase_: Tuple , **lowerCamelCase_: Any ):
try:
return func(*lowerCamelCase_ , **lowerCamelCase_ )
except HTTPError as err:
if str(lowerCamelCase_ ).startswith("500" ) or str(lowerCamelCase_ ).startswith("502" ):
pytest.xfail(str(lowerCamelCase_ ) )
raise err
return decorator.decorator(_wrapper , lowerCamelCase_ )
class _a :
def __init__( self : str , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Optional[Any] ) -> Optional[int]:
snake_case : List[Any] = returncode
snake_case : Tuple = stdout
snake_case : Dict = stderr
async def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str , lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
while True:
snake_case : List[Any] = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any , lowerCamelCase_: Dict=None , lowerCamelCase_: Union[str, Any]=None , lowerCamelCase_: int=None , lowerCamelCase_: str=False , lowerCamelCase_: Optional[Any]=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(lowerCamelCase_ ) )
snake_case : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case : Optional[int] = []
snake_case : Tuple = []
def tee(lowerCamelCase_: Union[str, Any] , lowerCamelCase_: str , lowerCamelCase_: Dict , lowerCamelCase_: Union[str, Any]="" ):
snake_case : Optional[Any] = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label="stderr:" ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: Union[str, Any]=None , lowerCamelCase_: Any=None , lowerCamelCase_: List[str]=1_8_0 , lowerCamelCase_: Dict=False , lowerCamelCase_: Optional[Any]=True ):
"""simple docstring"""
snake_case : Optional[int] = asyncio.get_event_loop()
snake_case : str = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
snake_case : Union[str, Any] = " ".join(lowerCamelCase_ )
if result.returncode > 0:
snake_case : List[Any] = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : int = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
snake_case : Dict = re.sub(r"^gw" , "" , lowerCamelCase_ , 0 , re.M )
return int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : int = 2_9_5_0_0
snake_case : str = pytest_xdist_worker_id()
return port + uniq_delta
| 449
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : str) ->Optional[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any]) ->List[str]:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 177
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = XGLMConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=14 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[Any]=0.02 , ) ->str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = ffn_dim
A__ = activation_function
A__ = activation_dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = 0
A__ = 2
A__ = 1
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = self.get_config()
A__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = TFXGLMModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFXGLMModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''')
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=True) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A__ = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
tf.random.set_seed(0)
A__ = tokenizer('''Today is a nice day and''' , return_tensors='''tf''')
A__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0'''):
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , seed=[7, 0])
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__)
A__ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = '''left'''
# use different length sentences to test batching
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
A__ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' , padding=UpperCAmelCase__)
A__ = inputs['''input_ids''']
A__ = model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12)
A__ = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence])
| 177
| 1
|
"""simple docstring"""
from math import ceil
def _UpperCamelCase ( UpperCamelCase = 1001 ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__UpperCAmelCase : Optional[Any] = 2 * i + 1
__UpperCAmelCase : Optional[int] = 2 * i
__UpperCAmelCase : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 77
|
def _lowercase ( UpperCAmelCase_=28_123):
"""simple docstring"""
snake_case__ : Dict = [1] * (limit + 1)
for i in range(2 , int(limit**0.5) + 1):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1):
sum_divs[k * i] += k + i
snake_case__ : Union[str, Any] = set()
snake_case__ : Union[str, Any] = 0
for n in range(1 , limit + 1):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase_)
if not any((n - a in abundants) for a in abundants):
res += n
return res
if __name__ == "__main__":
print(solution())
| 648
| 0
|
"""simple docstring"""
a__ : Union[str, Any] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
1_0: '''a''',
1_1: '''b''',
1_2: '''c''',
1_3: '''d''',
1_4: '''e''',
1_5: '''f''',
}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = int(__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = ""
__SCREAMING_SNAKE_CASE = False
if decimal < 0:
__SCREAMING_SNAKE_CASE = True
decimal *= -1
while decimal > 0:
__SCREAMING_SNAKE_CASE = divmod(__lowerCAmelCase , 16 )
__SCREAMING_SNAKE_CASE = values[remainder] + hexadecimal
__SCREAMING_SNAKE_CASE = "0x" + hexadecimal
if negative:
__SCREAMING_SNAKE_CASE = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a__ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 553
| 0
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCAmelCase : Optional[int] = object()
# For specifying empty leaf dict `{}`
__lowerCAmelCase : Any = object()
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Any = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
snake_case_ : List[Any] = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
def replace(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
for rule, replacement in rules:
if _match(__UpperCamelCase , __UpperCamelCase ):
return replacement
return val
return replace
def __lowerCAmelCase ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , __UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[int] = _get_partition_rules()
snake_case_ : Optional[int] = _replacement_rules(__UpperCamelCase )
snake_case_ : List[str] = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
snake_case_ : List[str] = {k: replace(__UpperCamelCase , __UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 58
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class snake_case_ (_lowerCAmelCase ):
"""simple docstring"""
_lowerCamelCase = 'speech_to_text_2'
_lowerCamelCase = ['past_key_values']
_lowerCamelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self ,lowercase=10000 ,lowercase=6 ,lowercase=2048 ,lowercase=4 ,lowercase=0.0 ,lowercase=True ,lowercase="relu" ,lowercase=256 ,lowercase=0.1 ,lowercase=0.0 ,lowercase=0.0 ,lowercase=0.02 ,lowercase=2 ,lowercase=True ,lowercase=1 ,lowercase=0 ,lowercase=2 ,lowercase=1024 ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Optional[int] = d_model
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[int] = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[Any] = init_std
UpperCAmelCase_ : str = decoder_layerdrop
UpperCAmelCase_ : int = use_cache
UpperCAmelCase_ : Optional[int] = decoder_layers
UpperCAmelCase_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ : str = max_target_positions
super().__init__(
pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
| 709
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase = 16
__lowerCamelCase = 32
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = 1_6 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : List[Any] = DatasetDict(
{
"train": dataset["train"].select(__snake_case ),
"validation": dataset["train"].select(__snake_case ),
"test": dataset["validation"],
} )
def tokenize_function(__snake_case ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : int = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : str = 8
else:
UpperCAmelCase_ : Dict = None
return tokenizer.pad(
__snake_case , padding="longest" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : List[str] = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets["test"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader, test_dataloader
def _snake_case ( __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ : Tuple = []
# Download the dataset
UpperCAmelCase_ : Optional[int] = load_dataset("glue" , "mrpc" )
# Create our splits
UpperCAmelCase_ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[int] = config["lr"]
UpperCAmelCase_ : Dict = int(config["num_epochs"] )
UpperCAmelCase_ : Union[str, Any] = int(config["seed"] )
UpperCAmelCase_ : Optional[Any] = int(config["batch_size"] )
UpperCAmelCase_ : Optional[Any] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : int = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
# New Code #
# Create our folds:
UpperCAmelCase_ : int = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
UpperCAmelCase_ : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = get_fold_dataloaders(
__snake_case , __snake_case , __snake_case , __snake_case , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : Tuple = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
UpperCAmelCase_ : str = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=1_0_0 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : Optional[Any] = model(**__snake_case )
UpperCAmelCase_ : Dict = outputs.loss
UpperCAmelCase_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : str = model(**__snake_case )
UpperCAmelCase_ : Optional[int] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
UpperCAmelCase_ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __snake_case )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase_ : Union[str, Any] = []
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**__snake_case )
UpperCAmelCase_ : Optional[int] = outputs.logits
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__snake_case , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase_ : Union[str, Any] = torch.cat(__snake_case , dim=0 )
UpperCAmelCase_ : str = torch.stack(__snake_case , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase_ : List[str] = metric.compute(predictions=__snake_case , references=__snake_case )
accelerator.print("Average test metrics from all folds:" , __snake_case )
def _snake_case ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=__snake_case , default=3 , help="The number of splits to perform across the dataset" )
UpperCAmelCase_ : Any = parser.parse_args()
UpperCAmelCase_ : List[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 455
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase_ = logging.getLogger()
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case_ = parser.parse_args()
return args.f
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : Dict ) ->None:
snake_case_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
def snake_case__( self : List[str] , _UpperCamelCase : Any ) ->Dict:
snake_case_ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(_UpperCamelCase , '''argv''' , _UpperCamelCase ):
snake_case_ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_UpperCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def snake_case__( self : Union[str, Any] ) ->Union[str, Any]:
snake_case_ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(_UpperCamelCase )
snake_case_ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_UpperCamelCase )
snake_case_ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_UpperCamelCase )
| 39
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
_lowercase = ksize + 1
_lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE_ ):
for x in range(SCREAMING_SNAKE_CASE_ ):
# distance from center
_lowercase = x - ksize // 2
_lowercase = y - ksize // 2
# degree to radiant
_lowercase = theta / 1_80 * np.pi
_lowercase = np.cos(_theta )
_lowercase = np.sin(_theta )
# get kernel x
_lowercase = cos_theta * px + sin_theta * py
# get kernel y
_lowercase = -sin_theta * px + cos_theta * py
# fill kernel
_lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A : Optional[Any] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
A : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A : Optional[int] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A : Optional[int] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A : int = out / out.max() * 255
A : str = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 287
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = 5_12 , lowerCamelCase_ = 5_12 , lowerCamelCase_ = 50 , lowerCamelCase_ = 7.5 , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = 0.0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Any:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = 1
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = len(lowerCamelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase_ )}.""" )
# get prompt text embeddings
lowerCAmelCase__ = self.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowerCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = text_embeddings.shape
lowerCAmelCase__ = text_embeddings.repeat(1 , lowerCamelCase_ , 1 )
lowerCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase__ = 42
if negative_prompt is None:
lowerCAmelCase__ = ['''''']
elif type(lowerCamelCase_ ) is not type(lowerCamelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_ )} !="""
F""" {type(lowerCamelCase_ )}.""" )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = text_input_ids.shape[-1]
lowerCAmelCase__ = self.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''' , )
lowerCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ = uncond_embeddings.shape[1]
lowerCAmelCase__ = uncond_embeddings.repeat(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase__ = torch.randn(
lowerCamelCase_ , generator=lowerCamelCase_ , device='''cpu''' , dtype=lowerCamelCase_ ).to(self.device )
lowerCAmelCase__ = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device='''cpu''' , dtype=lowerCamelCase_ ).to(
self.device )
else:
lowerCAmelCase__ = torch.randn(
lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
lowerCAmelCase__ = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCAmelCase__ = latents_reference.to(self.device )
lowerCAmelCase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCAmelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCAmelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCAmelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCAmelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCAmelCase__ = 0 if dx < 0 else dx
lowerCAmelCase__ = 0 if dy < 0 else dy
lowerCAmelCase__ = max(-dx , 0 )
lowerCAmelCase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCAmelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ = {}
if accepts_eta:
lowerCAmelCase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# predict the noise residual
lowerCAmelCase__ = self.unet(lowerCamelCase_ , lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.chunk(2 )
lowerCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = 1 / 0.18_215 * latents
lowerCAmelCase__ = self.vae.decode(lowerCamelCase_ ).sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCAmelCase__ = self.feature_extractor(self.numpy_to_pil(lowerCamelCase_ ) , return_tensors='''pt''' ).to(
self.device )
lowerCAmelCase__ , lowerCAmelCase__ = self.safety_checker(
images=lowerCamelCase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCAmelCase__ = None
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase_ , nsfw_content_detected=lowerCamelCase_ )
| 98
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = "ctrl"
lowercase__ : str = ["past_key_values"]
lowercase__ : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowerCamelCase_=24_65_34 , lowerCamelCase_=2_56 , lowerCamelCase_=12_80 , lowerCamelCase_=81_92 , lowerCamelCase_=48 , lowerCamelCase_=16 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1e-6 , lowerCamelCase_=0.02 , lowerCamelCase_=True , **lowerCamelCase_ , ) -> Union[str, Any]:
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_embd
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = dff
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
super().__init__(**lowerCamelCase_ )
| 98
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Tuple = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__lowercase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase , id=lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if exitstatus == 5:
__lowercase = 0
# Doctest custom flag to ignore output.
__UpperCamelCase : Optional[Any] = doctest.register_optionflag("""IGNORE_RESULT""")
__UpperCamelCase : List[Any] = doctest.OutputChecker
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Optional[int] = CustomOutputChecker
__UpperCamelCase : Optional[Any] = HfDoctestModule
__UpperCamelCase : Any = HfDocTestParser
| 80
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case : List[str] = logging.get_logger(__name__)
snake_case : List[str] = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class __lowercase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "resnet"
SCREAMING_SNAKE_CASE : List[Any] = ["basic", "bottleneck"]
def __init__( self , A_=3 , A_=64 , A_=[256, 512, 1024, 2048] , A_=[3, 4, 6, 3] , A_="bottleneck" , A_="relu" , A_=False , A_=None , A_=None , **A_ , )-> Union[str, Any]:
super().__init__(**A_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embedding_size
_SCREAMING_SNAKE_CASE = hidden_sizes
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = layer_type
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = downsample_in_first_stage
_SCREAMING_SNAKE_CASE = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(A_ ) + 1 )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = version.parse("1.11" )
@property
def __magic_name__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self )-> float:
return 1e-3
| 605
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
raise TypeError('only integers accepted as input' )
else:
snake_case_ : Union[str, Any] = str(abs(__a ) )
snake_case_ : Dict = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int(''.join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 534
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: Optional[Any] = BertJapaneseTokenizer
__magic_name__: Optional[int] = False
__magic_name__: Optional[int] = True
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
super().setUp()
snake_case_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : int , _A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
snake_case_ : Optional[int] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def UpperCAmelCase_ ( self : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.get_input_output_texts(_A )
snake_case_ : Any = tokenizer.encode(_A , add_special_tokens=_A )
snake_case_ : Union[str, Any] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
return text, ids
def UpperCAmelCase_ ( self : List[str] ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = self.tokenizer_class(self.vocab_file )
snake_case_ : int = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(_A )
snake_case_ : str = 'こんにちは、世界。\nこんばんは、世界。'
snake_case_ : Optional[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_A , 'wb' ) as handle:
pickle.dump(_A , _A )
with open(_A , 'rb' ) as handle:
snake_case_ : Any = pickle.load(_A )
snake_case_ : Optional[Any] = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
snake_case_ : str = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
try:
snake_case_ : Dict = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
snake_case_ : Any = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = MecabTokenizer(do_lower_case=_A , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
try:
snake_case_ : Union[str, Any] = MecabTokenizer(
do_lower_case=_A , normalize_text=_A , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = MecabTokenizer(normalize_text=_A , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(_A )
snake_case_ : Optional[Any] = 'こんにちは、世界。\nこんばんは、世界。'
snake_case_ : Optional[int] = tokenizer.tokenize(_A )
self.assertListEqual(_A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case_ : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_A , 'wb' ) as handle:
pickle.dump(_A , _A )
with open(_A , 'rb' ) as handle:
snake_case_ : Dict = pickle.load(_A )
snake_case_ : Any = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
@require_sudachi
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
snake_case_ : List[str] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
snake_case_ : List[str] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = SudachiTokenizer(do_lower_case=_A , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCAmelCase_ ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = SudachiTokenizer(normalize_text=_A , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case_ : List[str] = SudachiTokenizer(trim_whitespace=_A , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_A )
snake_case_ : Any = 'こんにちは、世界。\nこんばんは、世界。'
snake_case_ : Any = tokenizer.tokenize(_A )
self.assertListEqual(_A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case_ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_A , 'wb' ) as handle:
pickle.dump(_A , _A )
with open(_A , 'rb' ) as handle:
snake_case_ : Optional[Any] = pickle.load(_A )
snake_case_ : Union[str, Any] = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
@require_jumanpp
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
snake_case_ : Dict = JumanppTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = JumanppTokenizer(normalize_text=_A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[int] = JumanppTokenizer(trim_whitespace=_A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
snake_case_ : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
snake_case_ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
snake_case_ : List[Any] = {}
for i, token in enumerate(_A ):
snake_case_ : List[str] = i
snake_case_ : List[str] = WordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
snake_case_ : Optional[int] = tokenizer.subword_tokenizer
snake_case_ : List[str] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_A , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
snake_case_ : Dict = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_A , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ : Dict = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
snake_case_ : Tuple = tokenizer.encode('ありがとう。' , add_special_tokens=_A )
snake_case_ : Optional[Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=_A )
snake_case_ : Dict = tokenizer.build_inputs_with_special_tokens(_A )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: List[Any] = BertJapaneseTokenizer
__magic_name__: int = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
snake_case_ : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : List[Any] , **_A : Dict ) -> List[str]:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_A )
def UpperCAmelCase_ ( self : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = 'こんにちは、世界。 \nこんばんは、世界。'
snake_case_ : str = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Tuple ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Any = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
snake_case_ : int = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_A , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
snake_case_ : Optional[Any] = {}
for i, token in enumerate(_A ):
snake_case_ : str = i
snake_case_ : Tuple = CharacterTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : str = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
snake_case_ : Optional[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=_A )
snake_case_ : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=_A )
snake_case_ : List[Any] = tokenizer.build_inputs_with_special_tokens(_A )
snake_case_ : Tuple = tokenizer.build_inputs_with_special_tokens(_A , _A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = 'cl-tohoku/bert-base-japanese'
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[Any] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
snake_case_ : int = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 534
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.