code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : int = FunnelTokenizer
_A : Optional[Any] = FunnelTokenizerFast
_A : Optional[int] = True
_A : List[str] = True
def A_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : int = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A_ ( self : Union[str, Any] , **__lowercase : Optional[int] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def A_ ( self : Any , **__lowercase : Tuple ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def A_ ( self : Dict , __lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : int = '''UNwant\u00E9d,running'''
__UpperCAmelCase : Tuple = '''unwanted, running'''
return input_text, output_text
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = self.tokenizer_class(self.vocab_file )
__UpperCAmelCase : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
__UpperCAmelCase : Dict = tokenizer('''UNwant\u00E9d,running''' )
__UpperCAmelCase : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__UpperCAmelCase : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 522
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCamelCase_ ( *UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = list(UpperCAmelCase_ )
for i in range(len(UpperCAmelCase_ ) ):
__UpperCAmelCase : Optional[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCamelCase_ ( UpperCAmelCase_ ) ->bool:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCamelCase_ ( UpperCAmelCase_ = None , UpperCAmelCase_ = 1_28 ) ->str:
"""simple docstring"""
if function is None:
return functools.partial(UpperCAmelCase_ , starting_batch_size=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = starting_batch_size
def decorator(*UpperCAmelCase_ , **UpperCAmelCase_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__UpperCAmelCase : Optional[int] = list(inspect.signature(UpperCAmelCase_ ).parameters.keys() )
# Guard against user error
if len(UpperCAmelCase_ ) < (len(UpperCAmelCase_ ) + 1):
__UpperCAmelCase : Dict = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
except Exception as e:
if should_reduce_batch_size(UpperCAmelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 522
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """speech_to_text_2"""
a__ : List[Any] = ["""past_key_values"""]
a__ : Optional[int] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=10_000 , __lowercase=6 , __lowercase=2_048 , __lowercase=4 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=2 , __lowercase=True , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=1_024 , **__lowercase , ) -> List[str]:
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :Dict = d_model
__UpperCamelCase :List[str] = decoder_ffn_dim
__UpperCamelCase :Union[str, Any] = decoder_layers
__UpperCamelCase :List[Any] = decoder_attention_heads
__UpperCamelCase :List[Any] = dropout
__UpperCamelCase :Optional[int] = attention_dropout
__UpperCamelCase :Any = activation_dropout
__UpperCamelCase :Tuple = activation_function
__UpperCamelCase :Optional[int] = init_std
__UpperCamelCase :Optional[int] = decoder_layerdrop
__UpperCamelCase :List[Any] = use_cache
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase :Dict = max_target_positions
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
| 704
|
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCamelCase :Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 452
| 0
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {'vocab_file': 'vocab.txt'}
a : Any = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a : str = {
'facebook/esm2_t6_8M_UR50D': 1_024,
'facebook/esm2_t12_35M_UR50D': 1_024,
}
def lowerCAmelCase_ (lowerCAmelCase__: List[str] ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" ) as f:
UpperCAmelCase_: Tuple = f.read().splitlines()
return [l.strip() for l in lines]
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<cls>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_="<eos>", **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = load_vocab_file(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase_: Union[str, Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase_: str = unk_token
UpperCAmelCase_: List[str] = cls_token
UpperCAmelCase_: Dict = pad_token
UpperCAmelCase_: Optional[int] = mask_token
UpperCAmelCase_: Dict = eos_token
UpperCAmelCase_: Tuple = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> str:
return self._id_to_token.get(SCREAMING_SNAKE_CASE_, self.unk_token )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int:
return self._token_to_id.get(SCREAMING_SNAKE_CASE_, self._token_to_id.get(self.unk_token ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
return text.split()
def __snake_case (self, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
return len(self._id_to_token )
def __snake_case (self ) -> Optional[Any]:
return {token: i for i, token in enumerate(self.all_tokens )}
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int:
return self._token_to_id.get(SCREAMING_SNAKE_CASE_, self._token_to_id.get(self.unk_token ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> str:
return self._id_to_token.get(SCREAMING_SNAKE_CASE_, self.unk_token )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: int = [self.cls_token_id]
UpperCAmelCase_: str = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase_: List[Any] = [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE_ ) + [1]
return mask
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = os.path.join(SCREAMING_SNAKE_CASE_, (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(SCREAMING_SNAKE_CASE_, """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def __snake_case (self ) -> int:
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> int:
return super()._add_tokens(SCREAMING_SNAKE_CASE_, special_tokens=SCREAMING_SNAKE_CASE_ )
| 556
|
import numpy as np
def lowerCAmelCase_ (lowerCAmelCase__: np.ndarray , lowerCAmelCase__: float ):
"""simple docstring"""
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 556
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , ):
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
__lowerCAmelCase : Optional[int] = stage_names
__lowerCAmelCase : Dict = out_features
__lowerCAmelCase : Optional[Any] = backbone
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : int = use_pretrained_backbone
__lowerCAmelCase : Dict = is_training
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def __lowerCamelCase ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TimmBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = config_and_inputs
__lowerCAmelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (TimmBackbone,) if is_torch_available() else ()
A_ : List[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : Tuple = False
A_ : List[str] = False
A_ : int = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TimmBackboneModelTester(self )
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 'resnet18'
__lowerCAmelCase : Optional[Any] = 'microsoft/resnet-18'
__lowerCAmelCase : Optional[int] = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase : Dict = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
__lowerCAmelCase : Optional[Any] = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[int] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase : Union[str, Any] = self.all_model_classes[0]
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase : Dict = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(**_SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase : List[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase : List[str] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(**_SCREAMING_SNAKE_CASE )
| 549
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = False ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = F"Expected string as input, found {type(_UpperCamelCase )}"
raise ValueError(_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = F"Expected boolean as use_pascal parameter, found {type(_UpperCamelCase )}"
raise ValueError(_UpperCamelCase )
__lowerCAmelCase : Tuple = input_str.split('_' )
__lowerCAmelCase : int = 0 if use_pascal else 1
__lowerCAmelCase : Any = words[start_index:]
__lowerCAmelCase : Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCAmelCase : Any = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 549
| 1
|
"""simple docstring"""
def _UpperCamelCase ( A = 1_000_000 ):
UpperCamelCase_ =limit + 1
UpperCamelCase_ =[0] * limit
for first_term in range(1 , A ):
for n in range(A , A , A ):
UpperCamelCase_ =first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCamelCase_ =sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 391
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __lowerCAmelCase ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = BertJapaneseTokenizer
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = True
def UpperCamelCase__ ( self: int ):
super().setUp()
UpperCamelCase_ =[
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
UpperCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
UpperCamelCase_ ="こんにちは、世界。 \nこんばんは、世界。"
UpperCamelCase_ ="こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def UpperCamelCase__ ( self: Tuple , UpperCamelCase_: Any ):
UpperCamelCase_ , UpperCamelCase_ =self.get_input_output_texts(UpperCamelCase_ )
UpperCamelCase_ =tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def UpperCamelCase__ ( self: Optional[int] ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: int ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Union[str, Any] ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file )
UpperCamelCase_ =tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(UpperCamelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(UpperCamelCase_ )
UpperCamelCase_ ="こんにちは、世界。\nこんばんは、世界。"
UpperCamelCase_ =tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase_ =os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase_ , "wb" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as handle:
UpperCamelCase_ =pickle.load(UpperCamelCase_ )
UpperCamelCase_ =tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase__ ( self: Optional[Any] ):
try:
UpperCamelCase_ =MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase__ ( self: List[str] ):
try:
UpperCamelCase_ =MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =MecabTokenizer(do_lower_case=UpperCamelCase_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase__ ( self: Dict ):
try:
UpperCamelCase_ =MecabTokenizer(
do_lower_case=UpperCamelCase_ , normalize_text=UpperCamelCase_ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =MecabTokenizer(normalize_text=UpperCamelCase_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(UpperCamelCase_ )
UpperCamelCase_ ="こんにちは、世界。\nこんばんは、世界。"
UpperCamelCase_ =tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase_ =os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase_ , "wb" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as handle:
UpperCamelCase_ =pickle.load(UpperCamelCase_ )
UpperCamelCase_ =tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_sudachi
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =SudachiTokenizer(do_lower_case=UpperCamelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =SudachiTokenizer(normalize_text=UpperCamelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =SudachiTokenizer(trim_whitespace=UpperCamelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(UpperCamelCase_ )
UpperCamelCase_ ="こんにちは、世界。\nこんばんは、世界。"
UpperCamelCase_ =tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase_ =os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase_ , "wb" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as handle:
UpperCamelCase_ =pickle.load(UpperCamelCase_ )
UpperCamelCase_ =tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_jumanpp
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =JumanppTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =JumanppTokenizer(normalize_text=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =JumanppTokenizer(trim_whitespace=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
UpperCamelCase_ ={}
for i, token in enumerate(UpperCamelCase_ ):
UpperCamelCase_ =i
UpperCamelCase_ =WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
UpperCamelCase_ =tokenizer.subword_tokenizer
UpperCamelCase_ =subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(UpperCamelCase_ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
UpperCamelCase_ =subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(UpperCamelCase_ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
UpperCamelCase_ =tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowerCAmelCase ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = BertJapaneseTokenizer
__lowerCamelCase : Optional[int] = False
def UpperCamelCase__ ( self: Any ):
super().setUp()
UpperCamelCase_ =["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self: Dict , **UpperCamelCase_: List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Dict ):
UpperCamelCase_ ="こんにちは、世界。 \nこんばんは、世界。"
UpperCamelCase_ ="こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def UpperCamelCase__ ( self: int ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Tuple ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Dict ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
UpperCamelCase_ =tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
UpperCamelCase_ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCamelCase_ ={}
for i, token in enumerate(UpperCamelCase_ ):
UpperCamelCase_ =i
UpperCamelCase_ =CharacterTokenizer(vocab=UpperCamelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
UpperCamelCase_ =tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ ="cl-tohoku/bert-base-japanese"
UpperCamelCase_ =AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ ="cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
UpperCamelCase_ ="bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 391
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=32 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=[10, 20, 30, 40] , _lowerCAmelCase=[1, 1, 2, 1] , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=3 , _lowerCAmelCase=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embeddings_size
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = depths
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_labels
lowerCamelCase__ = scope
lowerCamelCase__ = len(_lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = TFResNetModel(config=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFResNetForImageClassification(_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
A__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def __magic_name__ ( self ):
lowerCamelCase__ = TFResNetModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def __magic_name__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ):
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def __magic_name__ ( self ):
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __magic_name__ ( self ):
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase__ = layer_type
lowerCamelCase__ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def __magic_name__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFResNetModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __UpperCamelCase ( ) ->Any:
lowerCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ):
lowerCamelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
lowerCamelCase__ = model(**_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowerCamelCase__ = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCAmelCase , atol=1E-4 ) )
| 360
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
A_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __UpperCamelCase ( a, a, a, a, a) ->Dict:
for attribute in key.split("."):
lowerCamelCase__ = getattr(a, a)
if weight_type is not None:
lowerCamelCase__ = getattr(a, a).shape
else:
lowerCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
elif weight_type == "running_mean":
lowerCamelCase__ = value
elif weight_type == "running_var":
lowerCamelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase__ = value
elif weight_type == "inv_freq":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def __UpperCamelCase ( a, a, a) ->Optional[int]:
lowerCamelCase__ = []
lowerCamelCase__ = fairseq_model.state_dict()
lowerCamelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
a, a, a, a, hf_model.config.feat_extract_norm == "group", )
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(a)[0].split(".")[-2]
lowerCamelCase__ = mapped_key.replace("*", a)
if "pos_bias_u" in name:
lowerCamelCase__ = None
elif "pos_bias_v" in name:
lowerCamelCase__ = None
elif "weight_g" in name:
lowerCamelCase__ = "weight_g"
elif "weight_v" in name:
lowerCamelCase__ = "weight_v"
elif "bias" in name:
lowerCamelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ = "weight"
elif "running_mean" in name:
lowerCamelCase__ = "running_mean"
elif "inv_freq" in name:
lowerCamelCase__ = "inv_freq"
elif "running_var" in name:
lowerCamelCase__ = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase__ = "num_batches_tracked"
else:
lowerCamelCase__ = None
set_recursively(a, a, a, a, a)
continue
if not is_used:
unused_weights.append(a)
logger.warning(f"Unused weights: {unused_weights}")
def __UpperCamelCase ( a, a, a, a, a) ->str:
lowerCamelCase__ = full_name.split("conv_layers.")[-1]
lowerCamelCase__ = name.split(".")
lowerCamelCase__ = int(items[0])
lowerCamelCase__ = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.")
lowerCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.")
lowerCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.")
lowerCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.")
lowerCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(a)
@torch.no_grad()
def __UpperCamelCase ( a, a, a=None, a=None, a=True) ->Optional[Any]:
if config_path is not None:
lowerCamelCase__ = WavaVecaConformerConfig.from_pretrained(a, hidden_act="swish")
else:
lowerCamelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase__ = "rotary"
if is_finetuned:
if dict_path:
lowerCamelCase__ = Dictionary.load(a)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ = target_dict.pad_index
lowerCamelCase__ = target_dict.bos_index
lowerCamelCase__ = target_dict.eos_index
lowerCamelCase__ = len(target_dict.symbols)
lowerCamelCase__ = os.path.join(a, "vocab.json")
if not os.path.isdir(a):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a))
return
os.makedirs(a, exist_ok=a)
lowerCamelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ = 0
lowerCamelCase__ = 1
with open(a, "w", encoding="utf-8") as vocab_handle:
json.dump(a, a)
lowerCamelCase__ = WavaVecaCTCTokenizer(
a, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=a, )
lowerCamelCase__ = True if config.feat_extract_norm == "layer" else False
lowerCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=a, return_attention_mask=a, )
lowerCamelCase__ = WavaVecaProcessor(feature_extractor=a, tokenizer=a)
processor.save_pretrained(a)
lowerCamelCase__ = WavaVecaConformerForCTC(a)
else:
lowerCamelCase__ = WavaVecaConformerForPreTraining(a)
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
else:
lowerCamelCase__ = argparse.Namespace(task="audio_pretraining")
lowerCamelCase__ = fairseq.tasks.setup_task(a)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=a)
lowerCamelCase__ = model[0].eval()
recursively_load_weights(a, a, not is_finetuned)
hf_wavavec.save_pretrained(a)
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 360
| 1
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : List[Any] = 0
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Dict = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowercase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Union[str, Any] = Path(UpperCamelCase_ ) / 'preprocessor_config.json'
__lowerCamelCase : Optional[int] = Path(UpperCamelCase_ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase_ , 'w' ) )
__lowerCamelCase : Optional[int] = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowercase_ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Optional[Any] = Path(UpperCamelCase_ ) / 'preprocessor_config.json'
__lowerCamelCase : str = Path(UpperCamelCase_ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase_ , 'w' ) )
__lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowercase_ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Tuple = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowerCamelCase : str = Path(UpperCamelCase_ ) / 'preprocessor_config.json'
__lowerCamelCase : Tuple = Path(UpperCamelCase_ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase_ , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowerCamelCase : int = AutoImageProcessor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop('image_processor_type' )
__lowerCamelCase : Optional[Any] = CLIPImageProcessor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase : Dict = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowercase_ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : List[str] = Path(UpperCamelCase_ ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase_ , 'w' ) , )
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowercase_ ( self ) -> Any:
with self.assertRaisesRegex(
UpperCamelCase_ , 'clip-base is not a local folder and is not a valid model identifier' ):
__lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained('clip-base' )
def lowercase_ ( self ) -> int:
with self.assertRaisesRegex(
UpperCamelCase_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowerCamelCase : Optional[int] = AutoImageProcessor.from_pretrained(UpperCamelCase_ , revision='aaaaaa' )
def lowercase_ ( self ) -> str:
with self.assertRaisesRegex(
UpperCamelCase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def lowercase_ ( self ) -> str:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase : Optional[int] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def lowercase_ ( self ) -> str:
try:
AutoConfig.register('custom' , UpperCamelCase_ )
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Tuple = Path(UpperCamelCase_ ) / 'preprocessor_config.json'
__lowerCamelCase : List[Any] = Path(UpperCamelCase_ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase_ , 'w' ) )
__lowerCamelCase : Optional[int] = CustomImageProcessor.from_pretrained(UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self ) -> Dict:
class UpperCAmelCase_ (a_ ):
"""simple docstring"""
lowerCamelCase : List[str] = True
try:
AutoConfig.register('custom' , UpperCamelCase_ )
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase : Optional[int] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase : str = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(UpperCamelCase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 13
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 545
| 0
|
'''simple docstring'''
def __magic_name__( lowerCamelCase):
try:
__lowerCAmelCase = float(lowerCamelCase)
except ValueError:
raise ValueError('''Please enter a valid number''')
__lowerCAmelCase = decimal - int(lowerCamelCase)
if fractional_part == 0:
return int(lowerCamelCase), 1
else:
__lowerCAmelCase = len(str(lowerCamelCase).split('''.''')[1])
__lowerCAmelCase = int(decimal * (1_0**number_of_frac_digits))
__lowerCAmelCase = 1_0**number_of_frac_digits
__lowerCAmelCase , __lowerCAmelCase = denominator, numerator
while True:
__lowerCAmelCase = dividend % divisor
if remainder == 0:
break
__lowerCAmelCase , __lowerCAmelCase = divisor, remainder
__lowerCAmelCase , __lowerCAmelCase = numerator / divisor, denominator / divisor
return int(lowerCamelCase), int(lowerCamelCase)
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction('67') = }""")
print(f"""{decimal_to_fraction('45.0') = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction('6.25') = }""")
print(f"""{decimal_to_fraction('78td') = }""")
| 474
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self , __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
__lowerCAmelCase = self.layer[current_layer](__lowercase , __lowercase , head_mask[current_layer] )
__lowerCAmelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __A , )
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__(__lowercase )
__lowerCAmelCase = BertEncoderWithPabee(__lowercase )
self.init_weights()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def _snake_case (self , __lowercase ):
__lowerCAmelCase = threshold
def _snake_case (self , __lowercase ):
__lowerCAmelCase = patience
def _snake_case (self ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def _snake_case (self ):
__lowerCAmelCase = self.inference_layers_num / self.inference_instances_num
__lowerCAmelCase = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(__lowercase )
@add_start_docstrings_to_model_forward(__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
__lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCAmelCase = torch.ones(__lowercase , device=__lowercase )
if token_type_ids is None:
__lowerCAmelCase = torch.zeros(__lowercase , dtype=torch.long , device=__lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCAmelCase = self.get_extended_attention_mask(__lowercase , __lowercase , __lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = encoder_hidden_states.size()
__lowerCAmelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__lowerCAmelCase = torch.ones(__lowercase , device=__lowercase )
__lowerCAmelCase = self.invert_attention_mask(__lowercase )
else:
__lowerCAmelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCAmelCase = self.get_head_mask(__lowercase , self.config.num_hidden_layers )
__lowerCAmelCase = self.embeddings(
input_ids=__lowercase , position_ids=__lowercase , token_type_ids=__lowercase , inputs_embeds=__lowercase )
__lowerCAmelCase = embedding_output
if self.training:
__lowerCAmelCase = []
for i in range(self.config.num_hidden_layers ):
__lowerCAmelCase = self.encoder.adaptive_forward(
__lowercase , current_layer=__lowercase , attention_mask=__lowercase , head_mask=__lowercase )
__lowerCAmelCase = self.pooler(__lowercase )
__lowerCAmelCase = output_layers[i](output_dropout(__lowercase ) )
res.append(__lowercase )
elif self.patience == 0: # Use all layers for inference
__lowerCAmelCase = self.encoder(
__lowercase , attention_mask=__lowercase , head_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__lowerCAmelCase = self.pooler(encoder_outputs[0] )
__lowerCAmelCase = [output_layers[self.config.num_hidden_layers - 1](__lowercase )]
else:
__lowerCAmelCase = 0
__lowerCAmelCase = None
__lowerCAmelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__lowerCAmelCase = self.encoder.adaptive_forward(
__lowercase , current_layer=__lowercase , attention_mask=__lowercase , head_mask=__lowercase )
__lowerCAmelCase = self.pooler(__lowercase )
__lowerCAmelCase = output_layers[i](__lowercase )
if regression:
__lowerCAmelCase = logits.detach()
if patient_result is not None:
__lowerCAmelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__lowerCAmelCase = 0
else:
__lowerCAmelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
__lowerCAmelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__lowercase ) ):
patient_counter += 1
else:
__lowerCAmelCase = 0
__lowerCAmelCase = logits
if patient_counter == self.patience:
break
__lowerCAmelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __A , )
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__(__lowercase )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = BertModelWithPabee(__lowercase )
__lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCAmelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
__lowerCAmelCase = self.bert(
input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__lowerCAmelCase = (logits[-1],)
if labels is not None:
__lowerCAmelCase = None
__lowerCAmelCase = 0
for ix, logits_item in enumerate(__lowercase ):
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase = MSELoss()
__lowerCAmelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__lowerCAmelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__lowerCAmelCase = (total_loss / total_weights,) + outputs
return outputs
| 474
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> list[int]:
UpperCamelCase__ : Any = [0 for i in range(len(lowerCamelCase_))]
# initialize interval's left pointer and right pointer
UpperCamelCase__, UpperCamelCase__ : List[str] = 0, 0
for i in range(1 , len(lowerCamelCase_)):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase__ : Tuple = min(right_pointer - i + 1 , z_result[i - left_pointer])
UpperCamelCase__ : Tuple = min_edge
while go_next(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase__, UpperCamelCase__ : Dict = i, i + z_result[i] - 1
return z_result
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> bool:
return i + z_result[i] < len(lowerCamelCase_) and s[z_result[i]] == s[i + z_result[i]]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase__ : Dict = z_function(pattern + input_str)
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowerCamelCase_):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 596
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''dpr'''
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=30_522 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Any=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Tuple=1e-12 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Union[str, Any]="absolute" , UpperCAmelCase_ : int = 0 , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = max_position_embeddings
UpperCamelCase__ : List[Any] = type_vocab_size
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : Any = layer_norm_eps
UpperCamelCase__ : Tuple = projection_dim
UpperCamelCase__ : List[str] = position_embedding_type
| 596
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any]=7 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=18 , _snake_case : List[str]=30 , _snake_case : Tuple=400 , _snake_case : Optional[Any]=True , _snake_case : int=None , _snake_case : Optional[int]=True , _snake_case : Dict=None , _snake_case : str=True , _snake_case : List[str]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _snake_case : str=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _snake_case : int=True , ):
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {'height': 224, 'width': 224}
UpperCAmelCase_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_convert_rgb
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowerCamelCase ( self : Tuple , _snake_case : List[str]=False , _snake_case : Tuple=False , _snake_case : Any=False):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCAmelCase_ = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
UpperCAmelCase_ = []
for i in range(self.batch_size):
UpperCAmelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
if torchify:
UpperCAmelCase_ = [torch.from_numpy(__a) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __snake_case ( UpperCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=__a)
@property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_center_crop'''))
self.assertTrue(hasattr(__a , '''center_crop'''))
self.assertTrue(hasattr(__a , '''do_normalize'''))
self.assertTrue(hasattr(__a , '''image_mean'''))
self.assertTrue(hasattr(__a , '''image_std'''))
self.assertTrue(hasattr(__a , '''do_convert_rgb'''))
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class __snake_case ( UpperCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a)
UpperCAmelCase_ = 3
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_center_crop'''))
self.assertTrue(hasattr(__a , '''center_crop'''))
self.assertTrue(hasattr(__a , '''do_normalize'''))
self.assertTrue(hasattr(__a , '''image_mean'''))
self.assertTrue(hasattr(__a , '''image_std'''))
self.assertTrue(hasattr(__a , '''do_convert_rgb'''))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 717
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = '''data2vec-audio'''
def __init__( self : str , _snake_case : List[str]=32 , _snake_case : Any=768 , _snake_case : Any=12 , _snake_case : Dict=12 , _snake_case : Any=3072 , _snake_case : int="gelu" , _snake_case : List[Any]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : int=0.0 , _snake_case : int=0.1 , _snake_case : Dict=0.1 , _snake_case : str=0.0_2 , _snake_case : Dict=1e-5 , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , _snake_case : Dict=(5, 2, 2, 2, 2, 2, 2) , _snake_case : str=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Optional[Any]=False , _snake_case : List[Any]=16 , _snake_case : int=19 , _snake_case : Optional[int]=5 , _snake_case : List[Any]=0.0_5 , _snake_case : Any=10 , _snake_case : Optional[Any]=2 , _snake_case : List[str]=0.0 , _snake_case : List[Any]=10 , _snake_case : Tuple=0 , _snake_case : List[Any]="sum" , _snake_case : List[Any]=False , _snake_case : List[str]=False , _snake_case : List[Any]=256 , _snake_case : str=(512, 512, 512, 512, 1500) , _snake_case : Tuple=(5, 3, 3, 1, 1) , _snake_case : List[Any]=(1, 2, 3, 1, 1) , _snake_case : Optional[Any]=512 , _snake_case : Optional[int]=0 , _snake_case : List[Any]=1 , _snake_case : List[str]=2 , _snake_case : Tuple=False , _snake_case : str=3 , _snake_case : Tuple=2 , _snake_case : List[str]=3 , _snake_case : Dict=None , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = conv_pos_kernel_size
UpperCAmelCase_ = len(self.conv_dim)
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layerdrop
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
UpperCAmelCase_ = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# adapter
UpperCAmelCase_ = add_adapter
UpperCAmelCase_ = adapter_kernel_size
UpperCAmelCase_ = adapter_stride
UpperCAmelCase_ = num_adapter_layers
UpperCAmelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = xvector_output_dim
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return math.prod(self.conv_stride)
| 169
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : float | Decimal , __lowerCamelCase : float = 10**-10 ) -> float:
a__ = a
while True:
a__ = Decimal(snake_case_ ) - (
Decimal(eval(snake_case_ ) ) / Decimal(eval(str(diff(snake_case_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case_ ) ) < precision: # noqa: S307
return float(snake_case_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 489
|
"""simple docstring"""
def __lowercase ( snake_case_ : str ,snake_case_ : str ) ->float:
'''simple docstring'''
def get_matched_characters(snake_case_ : str ,snake_case_ : str ) -> str:
__A : Any = []
__A : Any = min(len(_stra ) ,len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A : Dict = int(max(0 ,i - limit ) )
__A : Tuple = int(min(i + limit + 1 ,len(_stra ) ) )
if l in _stra[left:right]:
matched.append(snake_case_ )
__A : Any = F"""{_stra[0:_stra.index(snake_case_ )]} {_stra[_stra.index(snake_case_ ) + 1:]}"""
return "".join(snake_case_ )
# matching characters
__A : int = get_matched_characters(snake_case_ ,snake_case_ )
__A : Tuple = get_matched_characters(snake_case_ ,snake_case_ )
__A : str = len(snake_case_ )
# transposition
__A : Dict = (
len([(ca, ca) for ca, ca in zip(snake_case_ ,snake_case_ ) if ca != ca] ) // 2
)
if not match_count:
__A : List[str] = 0.0
else:
__A : Tuple = (
1
/ 3
* (
match_count / len(snake_case_ )
+ match_count / len(snake_case_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A : Tuple = 0
for ca, ca in zip(stra[:4] ,stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 177
| 0
|
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> int:
'''simple docstring'''
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError("String lengths must match!" )
_UpperCamelCase : int = 0
for chara, chara in zip(UpperCAmelCase ,UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : int = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(lowercase__ )
def __call__( self : str , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : Tuple ) ->bool:
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase__ : int , lowercase__ : Optional[int] = None ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = max_length
_UpperCamelCase : str = max_position_embeddings
@add_start_docstrings(lowercase__ )
def __call__( self : Optional[int] , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : Optional[Any] ) ->bool:
'''simple docstring'''
_UpperCamelCase : List[Any] = input_ids.shape[-1]
_UpperCamelCase : Optional[int] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : int , lowercase__ : int ) ->int:
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase__ , )
_UpperCamelCase : Dict = start_length
_UpperCamelCase : Optional[Any] = max_new_tokens
_UpperCamelCase : List[Any] = start_length + max_new_tokens
@add_start_docstrings(lowercase__ )
def __call__( self : Dict , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : Dict ) ->bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase__ : float , lowercase__ : Optional[float] = None ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Dict = max_time
_UpperCamelCase : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase__ )
def __call__( self : Dict , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : Tuple ) ->bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(lowercase__ )
def __call__( self : Optional[int] , lowercase__ : torch.LongTensor , lowercase__ : torch.FloatTensor , **lowercase__ : List[Any] ) ->bool:
'''simple docstring'''
return any(criteria(lowercase__ , lowercase__ ) for criteria in self )
@property
def snake_case__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
elif isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
return None
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> StoppingCriteriaList:
'''simple docstring'''
_UpperCamelCase : Optional[int] = stopping_criteria.max_length
_UpperCamelCase : Optional[Any] = deepcopy(UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" ,UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=UpperCAmelCase ) )
return new_stopping_criteria
| 204
| 0
|
"""simple docstring"""
import random
from typing import Any
def lowerCamelCase_ ( __lowerCAmelCase ) -> Dict:
'''simple docstring'''
for _ in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ =random.randint(0 , len(__lowerCAmelCase ) - 1 )
lowerCamelCase__ =random.randint(0 , len(__lowerCAmelCase ) - 1 )
lowerCamelCase__ , lowerCamelCase__ =data[b], data[a]
return data
if __name__ == "__main__":
a =[0, 1, 2, 3, 4, 5, 6, 7]
a =["""python""", """says""", """hello""", """!"""]
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 530
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , *,
lowerCAmelCase__ = 4 , lowerCAmelCase__ = 7_68 , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCAmelCase__ ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = nn.LayerNorm(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , *, lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCAmelCase__ , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCAmelCase__ )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase__ )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCAmelCase__ )
__lowercase = clip_extra_context_tokens.reshape(lowerCAmelCase__ , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCAmelCase__ )
__lowercase = self.text_encoder_hidden_states_norm(lowerCAmelCase__ )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 534
| 0
|
'''simple docstring'''
def A__ ( A : str):
'''simple docstring'''
assert column_title.isupper()
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Tuple = len(A) - 1
UpperCamelCase : Union[str, Any] = 0
while index >= 0:
UpperCamelCase : Optional[Any] = (ord(column_title[index]) - 64) * pow(26 , A)
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 435
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''timesformer'''
def __init__( self , lowerCamelCase=2_24 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=8 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-6 , lowerCamelCase=True , lowerCamelCase="divided_space_time" , lowerCamelCase=0 , **lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase )
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : int = num_frames
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = qkv_bias
UpperCamelCase : int = attention_type
UpperCamelCase : int = drop_path_rate
| 435
| 1
|
SCREAMING_SNAKE_CASE = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
1_0: 'a',
1_1: 'b',
1_2: 'c',
1_3: 'd',
1_4: 'e',
1_5: 'f',
}
def a (lowerCAmelCase__ ):
assert type(lowerCAmelCase__ ) in (int, float) and decimal == int(lowerCAmelCase__ )
__a = int(lowerCAmelCase__ )
__a = """"""
__a = False
if decimal < 0:
__a = True
decimal *= -1
while decimal > 0:
__a , __a = divmod(lowerCAmelCase__ , 16 )
__a = values[remainder] + hexadecimal
__a = """0x""" + hexadecimal
if negative:
__a = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=[3_0, 3_0] , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=1_0 , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Union[str, Any] = patch_size
UpperCamelCase__ : Union[str, Any] = num_channels
UpperCamelCase__ : Tuple = is_training
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : str = type_sequence_label_size
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : Dict = num_labels
UpperCamelCase__ : List[Any] = scope
UpperCamelCase__ : str = n_targets
UpperCamelCase__ : int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCamelCase__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCamelCase__ : Any = num_patches + 1 + self.num_detection_tokens
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCamelCase__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCamelCase__ : Union[str, Any] = []
for i in range(self.batch_size ):
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : str = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = torch.rand(self.n_targets , 4 , device=__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = YolosModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = YolosForObjectDetection(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : str = model(pixel_values=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCamelCase__ : Optional[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = config_and_inputs
UpperCamelCase__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCamelCase__ : List[Any] = []
for i in range(self.model_tester.batch_size ):
UpperCamelCase__ : Optional[int] = {}
UpperCamelCase__ : Union[str, Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__SCREAMING_SNAKE_CASE , dtype=torch.long )
UpperCamelCase__ : Tuple = torch.ones(
self.model_tester.n_targets , 4 , device=__SCREAMING_SNAKE_CASE , dtype=torch.float )
labels.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = labels
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Any = YolosModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Any = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : List[str] = True
# in YOLOS, the seq_len is different
UpperCamelCase__ : List[str] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : int = False
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ : str = True
UpperCamelCase__ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCamelCase__ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCamelCase__ : Optional[Any] = True
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = outputs.hidden_states
UpperCamelCase__ : Tuple = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# YOLOS has a different seq_length
UpperCamelCase__ : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase__ ,UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : Any = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__SCREAMING_SNAKE_CASE )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = YolosModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.default_image_processor
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : int = model(inputs.pixel_values )
# verify outputs
UpperCamelCase__ : Dict = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify postprocessing
UpperCamelCase__ : Any = image_processor.post_process_object_detection(
__SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCamelCase__ : List[Any] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = [7_5, 7_5, 1_7, 6_3, 1_7]
UpperCamelCase__ : List[str] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __SCREAMING_SNAKE_CASE ) )
| 285
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : NDArray[floataa] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ : Any = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
SCREAMING_SNAKE_CASE_ : int = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for row in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
SCREAMING_SNAKE_CASE_ : Any = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : NDArray[floataa] ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = table.shape
SCREAMING_SNAKE_CASE_ : Tuple = True
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_ : Any = DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids , lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(2 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = dc.update(3 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_ : Dict = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68
| 1
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase_ (__A ):
__magic_name__ = ''''''
__magic_name__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__magic_name__ = None # compression type in fsspec. ex: "gzip"
__magic_name__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , **lowerCAmelCase_ : List[str] ) -> List[Any]:
super().__init__(self , **lowerCAmelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase_ : Dict = fsspec.open(
lowerCAmelCase_ , mode="rb" , protocol=lowerCAmelCase_ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase_ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase_ : Any = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCAmelCase_ : Any = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCAmelCase_ ).lstrip("/" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if self.dir_cache is None:
UpperCAmelCase_ : Optional[Any] = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCAmelCase_ : str = {f["name"]: f}
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str ) -> Any:
return self.file.open().read()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple , ) -> Tuple:
UpperCAmelCase_ : List[str] = self._strip_protocol(lowerCAmelCase_ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class UpperCamelCase_ (__A ):
__magic_name__ = '''bz2'''
__magic_name__ = '''bz2'''
__magic_name__ = '''.bz2'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''gzip'''
__magic_name__ = '''gzip'''
__magic_name__ = '''.gz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''lz4'''
__magic_name__ = '''lz4'''
__magic_name__ = '''.lz4'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''xz'''
__magic_name__ = '''xz'''
__magic_name__ = '''.xz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''zstd'''
__magic_name__ = '''zstd'''
__magic_name__ = '''.zst'''
def __init__( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
fo=lowerCAmelCase_ , mode=lowerCAmelCase_ , target_protocol=lowerCAmelCase_ , target_options=lowerCAmelCase_ , block_size=lowerCAmelCase_ , **lowerCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase_ : Optional[Any] = self.file.__enter__
class UpperCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = file_
def __enter__( self : Tuple ) -> List[Any]:
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : int ) -> Optional[int]:
self._file.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __iter__( self : Optional[int] ) -> int:
return iter(self._file )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return next(self._file )
def __getattr__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
return getattr(self._file , lowerCAmelCase_ )
def fixed_enter(*lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any] ):
return WrappedFile(_enter(*lowerCAmelCase_ , **lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = fixed_enter
| 95
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = torch.device('''cpu''')
def snake_case ( ):
UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(A__ ,stream=A__ ).raw )
return im
def snake_case ( A__ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = dct.pop(A__ )
UpperCAmelCase_ : Optional[Any] = val
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = []
for k in state_dict.keys():
UpperCAmelCase_ : Union[str, Any] = k
if ".pwconv" in k:
UpperCAmelCase_ : Dict = k_new.replace(".pwconv" ,".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ : Any = k_new.replace(".dwconv" ,".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ : Dict = k_new.replace(".Proj." ,".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ : Tuple = k_new.replace("patch_embed" ,"swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ : List[Any] = k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ : Tuple = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCAmelCase_ : Optional[Any] = k_new.replace("network" ,"swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Optional[Any] = 10_00
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(A__ ,A__ ,repo_type="dataset" ) ,"r" ) )
UpperCAmelCase_ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ : Tuple = [3, 3, 6, 4]
UpperCAmelCase_ : str = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ : Optional[Any] = [3, 3, 9, 6]
UpperCAmelCase_ : Optional[Any] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ : int = [4, 3, 10, 5]
UpperCAmelCase_ : Union[str, Any] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ : Dict = [4, 4, 12, 6]
UpperCAmelCase_ : Optional[int] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(A__ ,map_location="cpu" ,check_hash=A__ )
else:
UpperCAmelCase_ : Any = torch.load(A__ ,map_location="cpu" )
UpperCAmelCase_ : List[str] = checkpoint
UpperCAmelCase_ : Dict = create_rename_keys(A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ ,A__ ,A__ )
# load HuggingFace model
UpperCAmelCase_ : Optional[int] = SwiftFormerForImageClassification(A__ ).eval()
hf_model.load_state_dict(A__ )
# prepare test inputs
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : int = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ : int = processor(images=A__ ,return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ : List[Any] = get_expected_output(A__ )
UpperCAmelCase_ : int = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] ,A__ ,atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
lowerCamelCase_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 95
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase ={
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 255
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCAmelCase =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _A ( _a : Tuple , _a : str , _a : int , _a : Dict , _a : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
A = getattr(_a , _a )
if weight_type is not None:
A = getattr(_a , _a ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _A ( _a : Union[str, Any] , _a : str ):
"""simple docstring"""
A = []
A = fairseq_model.state_dict()
A = hf_model.feature_extractor
A = hf_model.adapter
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == """group""" , )
A = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(_a , _a , _a , _a )
A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A = True
if "*" in mapped_key:
A = name.split(_a )[0].split(""".""" )[-2]
A = mapped_key.replace("""*""" , _a )
if "weight_g" in name:
A = """weight_g"""
elif "weight_v" in name:
A = """weight_v"""
elif "bias" in name:
A = """bias"""
elif "weight" in name:
A = """weight"""
else:
A = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f'Unused weights: {unused_weights}' )
def _A ( _a : int , _a : Optional[int] , _a : Any , _a : Union[str, Any] , _a : Tuple ):
"""simple docstring"""
A = full_name.split("""conv_layers.""" )[-1]
A = name.split(""".""" )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_a )
def _A ( _a : List[str] , _a : Any , _a : Union[str, Any] , _a : Optional[int] ):
"""simple docstring"""
A = full_name.split("""adaptor.""" )[-1]
A = name.split(""".""" )
if items[1].isdigit():
A = int(items[1] )
else:
A = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(_a , _a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(_a )
def _A ( _a : List[Any] ):
"""simple docstring"""
A , A = emb.weight.shape
A = nn.Linear(_a , _a , bias=_a )
A = emb.weight.data
return lin_layer
@torch.no_grad()
def _A ( _a : List[str] , _a : Tuple , _a : Dict , _a : Optional[Any] , _a : str , _a : Dict , _a : Optional[int] , _a : Optional[Any] , _a : Tuple , _a : int , _a : Tuple , ):
"""simple docstring"""
A = WavaVecaConfig.from_pretrained(
_a , add_adapter=_a , adapter_stride=_a , adapter_kernel_size=_a , use_auth_token=_a , output_hidden_size=_a , )
A = MBartConfig.from_pretrained(_a )
# load model
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
A = model[0].eval()
# load feature extractor
A = WavaVecaFeatureExtractor.from_pretrained(_a , use_auth_token=_a )
# set weights for wav2vec2 encoder
A = WavaVecaModel(_a )
recursively_load_weights_wavaveca(model.encoder , _a )
# load decoder weights
A = MBartForCausalLM(_a )
A , A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_a )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A = SpeechEncoderDecoderModel(encoder=_a , decoder=_a )
A = False
A = MBartaaTokenizer(_a )
tokenizer.save_pretrained(_a )
A = hf_wavavec.config.to_dict()
A = tokenizer.pad_token_id
A = tokenizer.bos_token_id
A = tokenizer.eos_token_id
A = """mbart50"""
A = """wav2vec2"""
A = tokenizer.eos_token_id
A = 2_5_0_0_0_4
A = tokenizer.eos_token_id
A = SpeechEncoderDecoderConfig.from_dict(_a )
hf_wavavec.save_pretrained(_a )
feature_extractor.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
UpperCAmelCase =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 255
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'sentencepiece.model'}
UpperCAmelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCAmelCase = {
'google/rembert': 2_5_6,
}
class snake_case__ ( __UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , A__ : Dict , A__ : Any=False , A__ : Optional[Any]=True , A__ : List[str]=True , A__ : Optional[Any]="[CLS]" , A__ : Optional[int]="[SEP]" , A__ : Optional[Any]="[UNK]" , A__ : Optional[Any]="[SEP]" , A__ : int="[PAD]" , A__ : str="[CLS]" , A__ : Optional[Any]="[MASK]" , **A__ : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , **A__ , )
snake_case_ : Dict = do_lower_case
snake_case_ : Tuple = remove_space
snake_case_ : List[Any] = keep_accents
snake_case_ : Union[str, Any] = vocab_file
snake_case_ : Optional[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(A__ )
@property
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.__dict__.copy()
snake_case_ : List[Any] = None
return state
def __setstate__( self : Any , A__ : int ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = d
snake_case_ : Dict = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : str , A__ : List[Any] , A__ : str=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.sp_model.EncodeAsPieces(A__ )
return pieces
def UpperCAmelCase__ ( self : int , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : Optional[int] ) -> Dict:
'''simple docstring'''
return self.sp_model.IdToPiece(A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : Any ) -> int:
'''simple docstring'''
snake_case_ : int = self.sp_model.decode_pieces(A__ )
return out_string
def UpperCAmelCase__ ( self : Any , A__ : Any , A__ : str = None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Tuple , A__ : Dict , A__ : str = None , A__ : Any = False ) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1]
def UpperCAmelCase__ ( self : str , A__ : str , A__ : Dict = None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Tuple , A__ : List[Any] , A__ : List[str] = None ) -> Tuple:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error("Vocabulary path ({}) should be a directory".format(A__ ) )
return
snake_case_ : str = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 666
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[str] = list(_A )
snake_case_ :Any = list(_A )
snake_case_ :Optional[Any] = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count += 1
snake_case_ :Optional[int] = "_"
if count > 1:
return False
else:
return "".join(_A )
def A ( _A ):
"""simple docstring"""
snake_case_ :Tuple = []
while True:
snake_case_ :int = ["$"] * len(_A )
snake_case_ :Union[str, Any] = []
for i in range(len(_A ) ):
for j in range(i + 1, len(_A ) ):
snake_case_ :Dict = compare_string(binary[i], binary[j] )
if k is False:
snake_case_ :Tuple = "*"
snake_case_ :List[str] = "*"
temp.append("X" )
for i in range(len(_A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_A ) == 0:
return pi
snake_case_ :Dict = list(set(_A ) )
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[int] = []
for minterm in minterms:
snake_case_ :Tuple = ""
for _ in range(_A ):
snake_case_ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_A )
return temp
def A ( _A, _A, _A ):
"""simple docstring"""
snake_case_ :Tuple = list(_A )
snake_case_ :List[str] = list(_A )
snake_case_ :Dict = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[Any] = []
snake_case_ :List[Any] = [0] * len(_A )
for i in range(len(chart[0] ) ):
snake_case_ :List[Any] = 0
snake_case_ :Optional[Any] = -1
for j in range(len(_A ) ):
if chart[j][i] == 1:
count += 1
snake_case_ :Dict = j
if count == 1:
snake_case_ :str = 1
for i in range(len(_A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_A ) ):
snake_case_ :str = 0
temp.append(prime_implicants[i] )
while True:
snake_case_ :Any = 0
snake_case_ :Optional[int] = -1
snake_case_ :List[Any] = 0
for i in range(len(_A ) ):
snake_case_ :str = chart[i].count(1 )
if count_n > max_n:
snake_case_ :Optional[Any] = count_n
snake_case_ :List[str] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_A ) ):
snake_case_ :Any = 0
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[Any] = [[0 for x in range(len(_A ) )] for x in range(len(_A ) )]
for i in range(len(_A ) ):
snake_case_ :Dict = prime_implicants[i].count("_" )
for j in range(len(_A ) ):
if is_for_table(prime_implicants[i], binary[j], _A ):
snake_case_ :Optional[int] = 1
return chart
def A ( ):
"""simple docstring"""
snake_case_ :str = int(input("Enter the no. of variables\n" ) )
snake_case_ :Dict = [
float(_A )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
snake_case_ :Tuple = decimal_to_binary(_A, _A )
snake_case_ :Tuple = check(_A )
print("Prime Implicants are:" )
print(_A )
snake_case_ :List[Any] = prime_implicant_chart(_A, _A )
snake_case_ :int = selection(_A, _A )
print("Essential Prime Implicants are:" )
print(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 584
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a_ ( UpperCamelCase_ ):
_snake_case = """vit_msn"""
def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any:
"""simple docstring"""
super().__init__(**__a)
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Dict = image_size
__snake_case : int = patch_size
__snake_case : Dict = num_channels
__snake_case : Tuple = qkv_bias
| 61
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__snake_case : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) )
return round(A , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
| 1
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : List[Any] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
UpperCamelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
UpperCamelCase : str = controlnet_params
UpperCamelCase : Dict = 'bird'
UpperCamelCase : List[str] = jax.device_count()
UpperCamelCase : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
UpperCamelCase : List[Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCamelCase : Dict = jax.random.PRNGKey(0 )
UpperCamelCase : Dict = jax.random.split(__UpperCamelCase , jax.device_count() )
UpperCamelCase : int = replicate(__UpperCamelCase )
UpperCamelCase : Union[str, Any] = shard(__UpperCamelCase )
UpperCamelCase : Union[str, Any] = shard(__UpperCamelCase )
UpperCamelCase : List[Any] = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
UpperCamelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase : Optional[int] = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCamelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase : str = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : str = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
UpperCamelCase : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
UpperCamelCase : List[str] = controlnet_params
UpperCamelCase : int = 'Chef in the kitchen'
UpperCamelCase : Any = jax.device_count()
UpperCamelCase : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
UpperCamelCase : int = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCamelCase : List[str] = jax.random.PRNGKey(0 )
UpperCamelCase : Any = jax.random.split(__UpperCamelCase , jax.device_count() )
UpperCamelCase : List[str] = replicate(__UpperCamelCase )
UpperCamelCase : Optional[Any] = shard(__UpperCamelCase )
UpperCamelCase : Any = shard(__UpperCamelCase )
UpperCamelCase : Optional[Any] = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
UpperCamelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase : str = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCamelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase : Union[str, Any] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 173
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self ,__UpperCamelCase = 768 ,) -> str:
'''simple docstring'''
super().__init__()
lowercase_ : Union[str, Any] = nn.Parameter(torch.zeros(1 ,__UpperCamelCase ) )
lowercase_ : Optional[Any] = nn.Parameter(torch.ones(1 ,__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
lowercase_ : str = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Any = (embeds * self.std) + self.mean
return embeds
| 425
| 0
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
'''simple docstring'''
@staticmethod
def lowerCamelCase__ ( *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Optional[int] ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def snake_case__ ( _snake_case : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = np.array(lowercase_ )
UpperCamelCase__ = npimg.shape
return {"hash": hashimage(lowercase_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
A = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = MaskGenerationPipeline(model=__a , image_processor=__a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCamelCase__ ( self :int ) -> str:
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase__ ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCamelCase__ = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_5_6 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_967},
{"mask": {"hash": "453c7844bd", "shape": (4_8_0, 6_4_0)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_909},
{"mask": {"hash": "64033ddc3f", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_879},
{"mask": {"hash": "801064ff79", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_834},
{"mask": {"hash": "6172f276ef", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_716},
{"mask": {"hash": "b49e60e084", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_612},
{"mask": {"hash": "a811e775fd", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_552},
{"mask": {"hash": "9d8257e080", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_532},
{"mask": {"hash": "32de6454a8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_499},
{"mask": {"hash": "3c6db475fb", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_483},
{"mask": {"hash": "c290813fb9", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_408},
{"mask": {"hash": "efb6cab859", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_326},
{"mask": {"hash": "788b798e24", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_262},
{"mask": {"hash": "abea804f0e", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_986},
{"mask": {"hash": "cd24047c8a", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_873},
{"mask": {"hash": "b5f47c9191", "shape": (4_8_0, 6_4_0)}, "scores": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = "facebook/sam-vit-huge"
UpperCamelCase__ = pipeline("mask-generation" , model=__a )
UpperCamelCase__ = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_210},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_053},
] , )
| 707
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def snake_case__ ( _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def snake_case__ ( _snake_case : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
UpperCamelCase__ = emb.weight.data
return lin_layer
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = torch.load(_snake_case , map_location="cpu" )
UpperCamelCase__ = Namespace(**checkpoint["cfg"]["model"] )
UpperCamelCase__ = checkpoint["model"]
remove_ignore_keys_(_snake_case )
UpperCamelCase__ = state_dict["decoder.embed_tokens.weight"].shape[0]
UpperCamelCase__ = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
UpperCamelCase__ = XGLMConfig(
vocab_size=_snake_case , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCamelCase__ = XGLMForCausalLM(_snake_case )
UpperCamelCase__ = model.load_state_dict(_snake_case , strict=_snake_case )
print(_snake_case )
UpperCamelCase__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A : Dict = parser.parse_args()
A : Any = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 304
| 0
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ():
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT):
with pytest.raises(__lowercase):
requests.request('GET' , 'https://huggingface.co')
with pytest.raises(requests.exceptions.ConnectTimeout):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0)
@pytest.mark.integration
def _snake_case ():
with offline(OfflineSimulationMode.CONNECTION_FAILS):
with pytest.raises(requests.exceptions.ConnectionError):
requests.request('GET' , 'https://huggingface.co')
def _snake_case ():
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1):
with pytest.raises(__lowercase):
http_head('https://huggingface.co')
| 23
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = 'https://openaipublic.azureedge.net/jukebox/models/'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def a__ ( snake_case__ : Union[str, Any] ):
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : Dict = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : List[str] = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : Union[str, Any] = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : Optional[Any] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
_UpperCAmelCase : int = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase : Union[str, Any] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
_UpperCAmelCase : List[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def a__ ( snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str ):
_UpperCAmelCase : int = {}
import re
_UpperCAmelCase : Tuple = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_UpperCAmelCase : Union[str, Any] = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Any = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : List[str] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_UpperCAmelCase : Optional[Any] = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Optional[Any] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Union[str, Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
_UpperCAmelCase : Optional[int] = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Optional[Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_encoder_block_conv_in.match(snake_case__ )
_UpperCAmelCase : Tuple = regex_match.groups()
_UpperCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_UpperCAmelCase : Union[str, Any] = re_encoder_block_conv_in.sub(snake_case__ , snake_case__ )
elif re_encoder_block_resnet.fullmatch(snake_case__ ):
_UpperCAmelCase : Tuple = re_encoder_block_resnet.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : int = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase : List[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
_UpperCAmelCase : str = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_UpperCAmelCase : Dict = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCAmelCase : Optional[int] = prefix + resnet_block
_UpperCAmelCase : Any = re_encoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_encoder_block_proj_out.fullmatch(snake_case__ ):
_UpperCAmelCase : List[str] = re_encoder_block_proj_out.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : List[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_UpperCAmelCase : Tuple = re_encoder_block_proj_out.sub(snake_case__ , snake_case__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case__ ):
_UpperCAmelCase : Any = re_decoder_block_conv_out.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_UpperCAmelCase : Optional[Any] = re_decoder_block_conv_out.sub(snake_case__ , snake_case__ )
elif re_decoder_block_resnet.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_decoder_block_resnet.match(snake_case__ )
_UpperCAmelCase : Optional[int] = regex_match.groups()
_UpperCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
_UpperCAmelCase : List[str] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_UpperCAmelCase : Dict = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCAmelCase : List[Any] = prefix + resnet_block
_UpperCAmelCase : Union[str, Any] = re_decoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_decoder_block_proj_in.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_decoder_block_proj_in.match(snake_case__ )
_UpperCAmelCase : int = regex_match.groups()
_UpperCAmelCase : Union[str, Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_UpperCAmelCase : Optional[Any] = re_decoder_block_proj_in.sub(snake_case__ , snake_case__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case__ ):
_UpperCAmelCase : Any = re_prior_cond_conv_out.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : int = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase : Optional[int] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_UpperCAmelCase : str = re_prior_cond_conv_out.sub(snake_case__ , snake_case__ )
elif re_prior_cond_resnet.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_prior_cond_resnet.match(snake_case__ )
_UpperCAmelCase : str = regex_match.groups()
_UpperCAmelCase : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase : str = {"""1""": 1, """3""": 2}[groups[-2]]
_UpperCAmelCase : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_UpperCAmelCase : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCAmelCase : int = prefix + resnet_block
_UpperCAmelCase : str = re_prior_cond_resnet.sub(snake_case__ , snake_case__ )
elif re_prior_cond_proj_in.fullmatch(snake_case__ ):
_UpperCAmelCase : Union[str, Any] = re_prior_cond_proj_in.match(snake_case__ )
_UpperCAmelCase : Optional[int] = regex_match.groups()
_UpperCAmelCase : Optional[Any] = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_UpperCAmelCase : Tuple = re_prior_cond_proj_in.sub(snake_case__ , snake_case__ )
# keep original key
else:
_UpperCAmelCase : Tuple = original_key
_UpperCAmelCase : List[Any] = replace_key(snake_case__ )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
_UpperCAmelCase : List[str] = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_UpperCAmelCase : Tuple = original_key
_UpperCAmelCase : List[Any] = original_key
_UpperCAmelCase : int = value
return new_dict
@torch.no_grad()
def a__ ( snake_case__ : Any=None , snake_case__ : Optional[Any]=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
_UpperCAmelCase : Dict = requests.get(f'''{PREFIX}{file}''' , allow_redirects=snake_case__ )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=snake_case__ )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
_UpperCAmelCase : str = MODEL_MAPPING[model_name.split("""/""" )[-1]]
_UpperCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(snake_case__ )
_UpperCAmelCase : int = JukeboxModel(snake_case__ )
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = {}
for i, dict_name in enumerate(snake_case__ ):
_UpperCAmelCase : Any = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
_UpperCAmelCase : Dict = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
_UpperCAmelCase : Any = old_dic[k]
elif k.endswith(""".w""" ):
_UpperCAmelCase : Dict = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase : str = old_dic[k]
else:
_UpperCAmelCase : Dict = old_dic[k]
_UpperCAmelCase : Tuple = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
_UpperCAmelCase : str = fix_jukebox_keys(snake_case__ , model.state_dict() , snake_case__ , snake_case__ )
weight_dict.append(snake_case__ )
_UpperCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case__ )
for i in range(len(snake_case__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(snake_case__ , snake_case__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 643
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = self.dummy_uncond_unet
UpperCamelCase_ : Union[str, Any] = DDIMScheduler()
UpperCamelCase_ : List[str] = self.dummy_vq_model
UpperCamelCase_ : Union[str, Any] = LDMPipeline(unet=__lowerCAmelCase , vqvae=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase_ : str = torch.manual_seed(0 )
UpperCamelCase_ : Tuple = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_ : Dict = torch.manual_seed(0 )
UpperCamelCase_ : Optional[int] = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__lowerCAmelCase )[0]
UpperCamelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ : List[Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
UpperCamelCase_ : Optional[int] = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase_ : List[str] = torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = ldm(generator=__lowerCAmelCase , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase_ : Tuple = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
UpperCamelCase_ : Optional[int] = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 543
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase ="https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase =BASE_URL + "/user"
# https://github.com/settings/tokens
UpperCamelCase =os.environ.get("USER_TOKEN", "")
def snake_case ( a_ : str ) -> dict[Any, Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = {
"""Authorization""": f"token {auth_token}",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(a_ , headers=a_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"{key}: {value}")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 543
| 1
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase : Optional[Any] = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def UpperCAmelCase_ (_lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict=None ):
require_version(deps[pkg] , A_ )
| 327
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : Optional[Any]=10 ) -> Optional[int]:
__UpperCAmelCase = []
for _ in range(A_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCAmelCase ( A_ : str , A_ : List[Any]=10 ) -> List[Any]:
__UpperCAmelCase = []
for step in range(A_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = os.path.join(A_ , "schedule.bin" )
torch.save(scheduler.state_dict() , A_ )
__UpperCAmelCase = torch.load(A_ )
scheduler.load_state_dict(A_ )
return lrs
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Dict ) -> str:
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
__UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
__UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__UpperCAmelCase = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
__UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCAmelCase , weight_decay=0.0 , relative_step=__lowerCAmelCase , scale_parameter=__lowerCAmelCase , warmup_init=__lowerCAmelCase , )
for _ in range(1_000 ):
__UpperCAmelCase = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = nn.Linear(50 , 50 ) if is_torch_available() else None
lowerCAmelCase__ : Optional[int] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCAmelCase__ : Optional[int] = 10
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Any , __lowerCAmelCase: List[Any] , __lowerCAmelCase: int=None ) -> List[Any]:
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase , msg=__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__UpperCAmelCase , __UpperCAmelCase = data
__UpperCAmelCase = scheduler_func(self.optimizer , **__lowerCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__UpperCAmelCase = unwrap_schedule(__lowerCAmelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCAmelCase , __lowerCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
__UpperCAmelCase = scheduler_func(self.optimizer , **__lowerCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCAmelCase ) # wrap to test picklability of the schedule
__UpperCAmelCase = unwrap_and_save_reload_schedule(__lowerCAmelCase , self.num_steps )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCAmelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = fn
def __call__( self: int , *__lowerCAmelCase: List[str] , **__lowerCAmelCase: Any ) -> List[Any]:
'''simple docstring'''
return self.fn(*__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 221
| 0
|
import requests
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
_lowercase : Union[str, Any] = {'Content-Type': 'application/json'}
_lowercase : Dict = requests.post(SCREAMING_SNAKE_CASE , json={'text': message_body} , headers=SCREAMING_SNAKE_CASE )
if response.status_code != 200:
_lowercase : Union[str, Any] = (
'Request to slack returned an error '
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677
| 0
|
SCREAMING_SNAKE_CASE :Optional[Any] = range(2, 20 + 1)
SCREAMING_SNAKE_CASE :Optional[int] = [10**k for k in range(ks[-1] + 1)]
SCREAMING_SNAKE_CASE :dict[int, dict[int, list[list[int]]]] = {}
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
__A = sum(a_i[j] for j in range(a_ , len(a_ ) ) )
__A = sum(a_i[j] * base[j] for j in range(min(len(a_ ) , a_ ) ) )
__A , __A = 0, 0
__A = n - i
__A = memo.get(a_ )
if sub_memo is not None:
__A = sub_memo.get(a_ )
if jumps is not None and len(a_ ) > 0:
# find and make the largest jump without going over
__A = -1
for _k in range(len(a_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__A = _k
break
if max_jump >= 0:
__A , __A , __A = jumps[max_jump]
# since the difference between jumps is cached, add c
__A = diff + c
for j in range(min(a_ , len(a_ ) ) ):
__A , __A = divmod(a_ , 1_0 )
if new_c > 0:
add(a_ , a_ , a_ )
else:
__A = []
else:
__A = {c: []}
__A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__A , __A = next_term(a_ , k - 1 , i + dn , a_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__A , __A = compute(a_ , a_ , i + dn , a_ )
diff += _diff
dn += terms_jumped
__A = sub_memo[c]
# keep jumps sorted by # of terms skipped
__A = 0
while j < len(a_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(a_ , (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(a_ ):
a_i.extend([0 for _ in range(k - len(a_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__A = i
__A , __A , __A = 0, 0, 0
for j in range(len(a_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__A = ds_c + ds_b
diff += addend
__A = 0
for j in range(a_ ):
__A = a_i[j] + addend
__A , __A = divmod(a_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(a_ , a_ , a_ )
return diff, i - start_i
def UpperCAmelCase ( a_ , a_ , a_ ) -> Any:
"""simple docstring"""
for j in range(a_ , len(a_ ) ):
__A = digits[j] + addend
if s >= 1_0:
__A , __A = divmod(a_ , 1_0 )
__A = addend // 1_0 + quotient
else:
__A = s
__A = addend // 1_0
if addend == 0:
break
while addend > 0:
__A , __A = divmod(a_ , 1_0 )
digits.append(a_ )
def UpperCAmelCase ( a_ = 1_0**1_5 ) -> int:
"""simple docstring"""
__A = [1]
__A = 1
__A = 0
while True:
__A , __A = next_term(a_ , 2_0 , i + dn , a_ )
dn += terms_jumped
if dn == n - i:
break
__A = 0
for j in range(len(a_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase_ = {'''facebook/blenderbot_small-90M''': 512}
def snake_case ( A__ ):
UpperCAmelCase_ : str = set()
UpperCAmelCase_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Dict = char
UpperCAmelCase_ : List[Any] = set(A__ )
return pairs
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]="__start__" , lowerCAmelCase_ : str="__end__" , lowerCAmelCase_ : Union[str, Any]="__unk__" , lowerCAmelCase_ : Union[str, Any]="__null__" , **lowerCAmelCase_ : List[Any] , ) -> str:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : str = json.load(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ : str = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase_ : Any = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ : Any = {}
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : str ) -> str:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Dict = re.sub("([.,!?()])" , R" \1" , lowerCAmelCase_ )
UpperCAmelCase_ : int = re.sub("(')" , R" \1 " , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = re.sub(R"\s{2,}" , " " , lowerCAmelCase_ )
if "\n" in token:
UpperCAmelCase_ : Tuple = token.replace("\n" , " __newln__" )
UpperCAmelCase_ : Tuple = token.split(" " )
UpperCAmelCase_ : int = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
UpperCAmelCase_ : Any = token.lower()
UpperCAmelCase_ : List[str] = tuple(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase_ : List[Any] = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Dict = bigram
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Dict = 0
while i < len(lowerCAmelCase_ ):
try:
UpperCAmelCase_ : int = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
UpperCAmelCase_ : Any = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Dict = tuple(lowerCAmelCase_ )
UpperCAmelCase_ : Any = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = "@@ ".join(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = word[:-4]
UpperCAmelCase_ : Optional[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = re.findall(R"\S+\n?" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(" " ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str ) -> int:
UpperCAmelCase_ : List[Any] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] ) -> str:
UpperCAmelCase_ : List[str] = " ".join(lowerCAmelCase_ ).replace("@@ " , "" ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + "\n" )
UpperCAmelCase_ : Any = 0
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ : List[Any] = token_index
writer.write(" ".join(lowerCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
| 95
| 0
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Optional[Any]:
if len(_lowerCAmelCase ) == 0:
return []
_UpperCAmelCase , _UpperCAmelCase : List[Any] = min(_lowerCAmelCase ), max(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = int(max_value - min_value ) + 1
_UpperCAmelCase : Union[str, Any] = [[] for _ in range(_lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCAmelCase )
return [v for bucket in buckets for v in sorted(_lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 701
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( UpperCAmelCase ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 467
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , A_ : str , A_ : List[str]=13 , A_ : List[str]=7 , A_ : Optional[int]=True , A_ : Any=True , A_ : List[str]=True , A_ : Union[str, Any]=True , A_ : str=99 , A_ : Optional[int]=32 , A_ : Optional[int]=5 , A_ : Any=4 , A_ : Optional[int]=37 , A_ : str="gelu" , A_ : Optional[int]=0.1 , A_ : Optional[Any]=0.1 , A_ : Union[str, Any]=5_12 , A_ : Any=16 , A_ : Any=2 , A_ : int=0.02 , A_ : List[str]=4 , )-> List[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_attention_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_choices
def A ( self : int )-> Optional[Any]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_attention_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : str )-> Optional[int]:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : Tuple )-> List[str]:
__UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def A ( self : Optional[Any] )-> List[str]:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("albert-base-v2" )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Dict )-> Optional[Any]:
__UpperCamelCase = FlaxAlbertModel.from_pretrained("albert-base-v2" )
__UpperCamelCase = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase = model(A_ , attention_mask=A_ )[0]
__UpperCamelCase = (1, 11, 7_68)
self.assertEqual(output.shape , A_ )
__UpperCamelCase = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) )
| 505
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 505
| 1
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , ):
_snake_case : Tuple = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Optional[int] = image_size
_snake_case : Union[str, Any] = patch_size
_snake_case : List[str] = num_channels
_snake_case : Optional[Any] = is_training
_snake_case : str = use_labels
_snake_case : Dict = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Tuple = type_sequence_label_size
_snake_case : int = initializer_range
_snake_case : Optional[int] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Optional[int] = (image_size // patch_size) ** 2
_snake_case : int = num_patches + 1
def UpperCamelCase ( self ):
_snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : List[Any] = TFViTModel(config=lowercase_ )
_snake_case : List[Any] = model(lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_snake_case : List[Any] = self.image_size // 2
_snake_case : List[str] = pixel_values[:, :, :image_size, :image_size]
_snake_case : List[str] = model(lowercase_ , interpolate_pos_encoding=lowercase_ , training=lowercase_ )
_snake_case : Tuple = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Tuple = self.type_sequence_label_size
_snake_case : Tuple = TFViTForImageClassification(lowercase_ )
_snake_case : List[str] = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_snake_case : int = self.image_size // 2
_snake_case : Any = pixel_values[:, :, :image_size, :image_size]
_snake_case : List[Any] = model(lowercase_ , interpolate_pos_encoding=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : List[str] = 1
_snake_case : Union[str, Any] = TFViTForImageClassification(lowercase_ )
_snake_case : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
_snake_case ,_snake_case ,_snake_case : str = config_and_inputs
_snake_case : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_lowerCamelCase = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : str = TFViTModelTester(self )
_snake_case : List[str] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : str = [*signature.parameters.keys()]
_snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCamelCase ( self ):
_snake_case : Any = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(lowercase_ )
def snake_case () -> str:
'''simple docstring'''
_snake_case : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Union[str, Any] = prepare_img()
_snake_case : List[Any] = image_processor(images=lowercase_ , return_tensors="tf" )
# forward pass
_snake_case : List[Any] = model(**lowercase_ )
# verify the logits
_snake_case : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
_snake_case : List[Any] = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase_ , atol=1e-4 )
| 580
|
from __future__ import annotations
def snake_case (__lowercase , __lowercase ) -> bool:
'''simple docstring'''
_snake_case : int = get_failure_array(__lowercase )
# 2) Step through text searching for pattern
_snake_case ,_snake_case : List[str] = 0, 0 # index into text, pattern
while i < len(__lowercase ):
if pattern[j] == text[i]:
if j == (len(__lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_snake_case : str = failure[j - 1]
continue
i += 1
return False
def snake_case (__lowercase ) -> list[int]:
'''simple docstring'''
_snake_case : List[str] = [0]
_snake_case : Any = 0
_snake_case : Tuple = 1
while j < len(__lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_snake_case : Optional[Any] = failure[i - 1]
continue
j += 1
failure.append(__lowercase )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Dict = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'ABABX'
__SCREAMING_SNAKE_CASE : Dict = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : List[Any] = 'AAAB'
__SCREAMING_SNAKE_CASE : List[str] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : Dict = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Dict = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 580
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 467
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _a ( __a ):
"""simple docstring"""
A_ = '''table-transformer'''
A_ = ['''past_key_values''']
A_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : str , lowercase_ : str=True , lowercase_ : List[str]=None , lowercase_ : Dict=3 , lowercase_ : Optional[Any]=100 , lowercase_ : Optional[int]=6 , lowercase_ : Optional[Any]=2_048 , lowercase_ : List[Any]=8 , lowercase_ : Optional[Any]=6 , lowercase_ : int=2_048 , lowercase_ : Any=8 , lowercase_ : Optional[Any]=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=True , lowercase_ : int="relu" , lowercase_ : Tuple=256 , lowercase_ : Dict=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : int=0.0_2 , lowercase_ : List[str]=1.0 , lowercase_ : Optional[int]=False , lowercase_ : List[Any]="sine" , lowercase_ : Optional[int]="resnet50" , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=False , lowercase_ : List[str]=1 , lowercase_ : Any=5 , lowercase_ : Optional[int]=2 , lowercase_ : Dict=1 , lowercase_ : Optional[int]=1 , lowercase_ : Dict=5 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[Any]=0.1 , **lowercase_ : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowercase_ = backbone_config.get("""model_type""" )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowercase_ , lowercase_ , lowercase_ = None, None, None
lowercase_ = use_timm_backbone
lowercase_ = backbone_config
lowercase_ = num_channels
lowercase_ = num_queries
lowercase_ = d_model
lowercase_ = encoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = encoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = init_xavier_std
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = encoder_layers
lowercase_ = auxiliary_loss
lowercase_ = position_embedding_type
lowercase_ = backbone
lowercase_ = use_pretrained_backbone
lowercase_ = dilation
# Hungarian matcher
lowercase_ = class_cost
lowercase_ = bbox_cost
lowercase_ = giou_cost
# Loss coefficients
lowercase_ = mask_loss_coefficient
lowercase_ = dice_loss_coefficient
lowercase_ = bbox_loss_coefficient
lowercase_ = giou_loss_coefficient
lowercase_ = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return self.d_model
class _a ( __a ):
"""simple docstring"""
A_ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return 1e-5
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 12
| 451
| 0
|
'''simple docstring'''
from math import pi, sqrt, tan
def __lowerCAmelCase ( snake_case__ ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase ( snake_case__ ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def __lowerCAmelCase ( snake_case__ ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
__UpperCamelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(snake_case__ , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def __lowerCAmelCase ( snake_case__ ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
__UpperCamelCase : Any = (sidea + sidea + sidea) / 2
__UpperCamelCase : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase ( snake_case__ ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f'Rectangle: {area_rectangle(10, 20) = }')
print(f'Square: {area_square(10) = }')
print(f'Triangle: {area_triangle(10, 10) = }')
print(f'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(f'Parallelogram: {area_parallelogram(10, 20) = }')
print(f'Rhombus: {area_rhombus(10, 20) = }')
print(f'Trapezium: {area_trapezium(10, 20, 30) = }')
print(f'Circle: {area_circle(20) = }')
print(f'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(f'Cube: {surface_area_cube(20) = }')
print(f'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(f'Sphere: {surface_area_sphere(20) = }')
print(f'Hemisphere: {surface_area_hemisphere(20) = }')
print(f'Cone: {surface_area_cone(10, 20) = }')
print(f'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(f'Cylinder: {surface_area_cylinder(10, 20) = }')
print(f'Torus: {surface_area_torus(20, 10) = }')
print(f'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(f'Square: {area_reg_polygon(4, 10) = }')
print(f'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 399
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 399
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase__ ( _lowerCamelCase ) ->List[Any]:
_UpperCAmelCase =384
if "tiny" in model_name:
_UpperCAmelCase =[3, 3, 9, 3]
_UpperCAmelCase =[96, 192, 384, 768]
if "small" in model_name:
_UpperCAmelCase =[3, 3, 27, 3]
_UpperCAmelCase =[96, 192, 384, 768]
if "base" in model_name:
_UpperCAmelCase =[3, 3, 27, 3]
_UpperCAmelCase =[128, 256, 512, 1024]
_UpperCAmelCase =512
if "large" in model_name:
_UpperCAmelCase =[3, 3, 27, 3]
_UpperCAmelCase =[192, 384, 768, 1536]
_UpperCAmelCase =768
if "xlarge" in model_name:
_UpperCAmelCase =[3, 3, 27, 3]
_UpperCAmelCase =[256, 512, 1024, 2048]
_UpperCAmelCase =1024
# set label information
_UpperCAmelCase =150
_UpperCAmelCase ="huggingface/label-files"
_UpperCAmelCase ="ade20k-id2label.json"
_UpperCAmelCase =json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase ={int(_lowerCamelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase ={v: k for k, v in idalabel.items()}
_UpperCAmelCase =ConvNextConfig(
depths=_lowerCamelCase , hidden_sizes=_lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] )
_UpperCAmelCase =UperNetConfig(
backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def lowerCamelCase__ ( _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase =[]
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
_UpperCAmelCase =dct.pop(_lowerCamelCase )
_UpperCAmelCase =val
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase ={
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
_UpperCAmelCase =model_name_to_url[model_name]
_UpperCAmelCase =torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_UpperCAmelCase =get_upernet_config(_lowerCamelCase )
_UpperCAmelCase =UperNetForSemanticSegmentation(_lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_UpperCAmelCase =state_dict.pop(_lowerCamelCase )
if "bn" in key:
_UpperCAmelCase =key.replace("bn" , "batch_norm" )
_UpperCAmelCase =val
# rename keys
_UpperCAmelCase =create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# verify on image
_UpperCAmelCase ="https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_UpperCAmelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
_UpperCAmelCase =SegformerImageProcessor()
_UpperCAmelCase =processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
with torch.no_grad():
_UpperCAmelCase =model(_lowerCamelCase )
if model_name == "upernet-convnext-tiny":
_UpperCAmelCase =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_UpperCAmelCase =torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_UpperCAmelCase =torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_UpperCAmelCase =torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_UpperCAmelCase =torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case__ : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 408
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None ) ->str:
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase =quote(_lowerCamelCase )
return hfh.hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" , revision=_lowerCamelCase )
| 408
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
_snake_case : List[Any] = JukeboxTokenizer
_snake_case : Optional[int] = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n """,
}
@require_torch
def _snake_case ( self : Tuple ):
'''simple docstring'''
import torch
__lowercase = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__lowercase = tokenizer(**self.metas )["input_ids"]
# fmt: off
__lowercase = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _snake_case ( self : Dict ):
'''simple docstring'''
import torch
__lowercase = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__lowercase = tokenizer(**self.metas )["input_ids"]
# fmt: off
__lowercase = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 705
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 768 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase )
__lowercase = nn.LayerNorm(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , *, lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 655
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ):
lowercase__ = set()
# Replace all the whitespace in our sentence
lowercase__ = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ):
lowercase__ = [False] * 26
for char in input_str:
if char.islower():
lowercase__ = True
elif char.isupper():
lowercase__ = True
return all(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __lowerCAmelCase ( ):
from timeit import timeit
lowercase__ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("is_pangram_faster()" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("is_pangram_fastest()" , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 413
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : List[Any] =StableUnCLIPImgaImgPipeline
UpperCamelCase__ : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ : int =frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : Optional[Any] =frozenset([])
def A__ ( self : Optional[Any] ):
lowercase__ = 32
lowercase__ = embedder_hidden_size
# image encoding components
lowercase__ = CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowercase__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase, projection_dim=__lowercase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=__lowercase )
lowercase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=__lowercase, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=__lowercase, layers_per_block=1, upcast_attention=__lowercase, use_linear_projection=__lowercase, )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=__lowercase, steps_offset=1, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL()
lowercase__ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self : Dict, __lowercase : Tuple, __lowercase : Union[str, Any]=0, __lowercase : Tuple=True ):
if str(__lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(__lowercase )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(__lowercase ) ).to(__lowercase )
if pil_image:
lowercase__ = input_image * 0.5 + 0.5
lowercase__ = input_image.clamp(0, 1 )
lowercase__ = input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowercase__ = DiffusionPipeline.numpy_to_pil(__lowercase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self : str ):
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableUnCLIPImgaImgPipeline(**__lowercase )
lowercase__ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs(__lowercase )
inputs.update({"image_embeds": None} )
lowercase__ = sd_pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self : List[str] ):
lowercase__ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase )
def A__ ( self : Optional[Any] ):
lowercase__ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__lowercase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def A__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase):
def A__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : List[Any] ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(__lowercase, "anime turle", generator=__lowercase, output_type="np" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase, __lowercase )
def A__ ( self : Any ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(__lowercase, "anime turle", generator=__lowercase, output_type="np" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase, __lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.floataa )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
__lowercase, "anime turtle", num_inference_steps=2, output_type="np", )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 413
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE( metaclass=A__ ):
"""simple docstring"""
lowerCamelCase__ = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : str , *__snake_case : Any , **__snake_case : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A ( cls : List[Any] , *__snake_case : Union[str, Any] , **__snake_case : Tuple ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A ( cls : Optional[int] , *__snake_case : Any , **__snake_case : List[str] ) -> List[str]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 715
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__: Union[str, Any] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[str] = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 528
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case ( _a ):
"""simple docstring"""
_a = """openai-gpt"""
_a = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, _lowercase=40478, _lowercase=512, _lowercase=768, _lowercase=12, _lowercase=12, _lowercase="gelu", _lowercase=0.1, _lowercase=0.1, _lowercase=0.1, _lowercase=1E-5, _lowercase=0.02, _lowercase="cls_index", _lowercase=True, _lowercase=None, _lowercase=True, _lowercase=0.1, **_lowercase, ) -> str:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = n_positions
SCREAMING_SNAKE_CASE_ = n_embd
SCREAMING_SNAKE_CASE_ = n_layer
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = afn
SCREAMING_SNAKE_CASE_ = resid_pdrop
SCREAMING_SNAKE_CASE_ = embd_pdrop
SCREAMING_SNAKE_CASE_ = attn_pdrop
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = summary_type
SCREAMING_SNAKE_CASE_ = summary_use_proj
SCREAMING_SNAKE_CASE_ = summary_activation
SCREAMING_SNAKE_CASE_ = summary_first_dropout
SCREAMING_SNAKE_CASE_ = summary_proj_to_labels
super().__init__(**_lowercase )
| 294
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : Any = PriorTransformer
snake_case_ : List[str] = """hidden_states"""
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Optional[Any]=0 ) -> Any:
'''simple docstring'''
torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
return (4, 8)
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
return (4, 8)
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(SCREAMING_SNAKE_CASE )
__snake_case = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
__snake_case = model.to(SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , "set_default_attn_processor" ):
model.set_default_attn_processor()
__snake_case = self.get_dummy_seed_input()
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )[0]
__snake_case = output[0, :5].flatten().cpu()
print(SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__snake_case = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=1e-2 ) )
@slow
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : int=1 , SCREAMING_SNAKE_CASE : str=7_6_8 , SCREAMING_SNAKE_CASE : Dict=7_7 , SCREAMING_SNAKE_CASE : int=0 ) -> List[str]:
'''simple docstring'''
torch.manual_seed(SCREAMING_SNAKE_CASE )
__snake_case = batch_size
__snake_case = embedding_dim
__snake_case = num_embeddings
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_seed_input(seed=SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 7_6_8]
__snake_case = sample[0, :8].flatten().cpu()
print(SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE )
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
| 371
| 0
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ):
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase ={'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCamelCase =features.copy()
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ):
"""simple docstring"""
if issubclass(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase =jsonl_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase =[jsonl_path]
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict=("train",) ):
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
__UpperCamelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader({'''train''': jsonl_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if split:
__UpperCamelCase ={split: jsonl_path}
else:
__UpperCamelCase ='''train'''
__UpperCamelCase ={'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
return json.load(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return [json.loads(__UpperCamelCase ) for line in buffer]
class _lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / f"""test.json.{extension}"""
__UpperCamelCase =str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCamelCase =f.read()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCamelCase =f.read()
assert exported_content == original_content
| 296
|
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowercase = 299_792_458
# Symbols
__lowercase , __lowercase , __lowercase , __lowercase = symbols('''ct x y z''')
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(__UpperCamelCase ) ** 2 )
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return np.array(
[
[gamma(__UpperCamelCase ), -gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), 0, 0],
[-gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), gamma(__UpperCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : np.ndarray | None = None ):
"""simple docstring"""
if event is None:
__UpperCamelCase =np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__UpperCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowercase = transform(29_979_245)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__lowercase = {ct: c, x: 1, y: 1, z: 1}
__lowercase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 296
| 1
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Any ) -> str:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : Optional[int] ) -> str:
A = tmp_path / """cache"""
A = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A = TextDatasetReader(lowerCAmelCase, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase, lowerCAmelCase )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Any, lowerCAmelCase : Dict ) -> Optional[int]:
A = tmp_path / """cache"""
A = {"""text""": """string"""}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = TextDatasetReader(lowerCAmelCase, features=lowerCAmelCase, cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase, lowerCAmelCase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Optional[int], lowerCAmelCase : Union[str, Any] ) -> List[str]:
A = tmp_path / """cache"""
A = {"""text""": """string"""}
A = TextDatasetReader(lowerCAmelCase, cache_dir=lowerCAmelCase, split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase, lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : int, lowerCAmelCase : Optional[int] ) -> Optional[int]:
if issubclass(lowerCAmelCase, lowerCAmelCase ):
A = text_path
elif issubclass(lowerCAmelCase, lowerCAmelCase ):
A = [text_path]
A = tmp_path / """cache"""
A = {"""text""": """string"""}
A = TextDatasetReader(lowerCAmelCase, cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : List[Any], lowerCAmelCase : List[str], lowerCAmelCase : Dict=("train",) ) -> Union[str, Any]:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
for split in splits:
A = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : List[str], lowerCAmelCase : Any ) -> List[Any]:
A = tmp_path / """cache"""
A = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase, lowerCAmelCase )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : Any, lowerCAmelCase : Optional[Any] ) -> List[str]:
A = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
A = {"""text""": """string"""}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = TextDatasetReader({'train': text_path}, features=lowerCAmelCase, cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase, lowerCAmelCase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : Any, lowerCAmelCase : Dict ) -> Union[str, Any]:
if split:
A = {split: text_path}
else:
A = """train"""
A = {"""train""": text_path, """test""": text_path}
A = tmp_path / """cache"""
A = {"""text""": """string"""}
A = TextDatasetReader(lowerCAmelCase, cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase, lowerCAmelCase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 699
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38
| 0
|
import numpy
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
A : Tuple = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
A : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
A : Union[str, Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
A : Optional[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
A : List[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
A : Any = numpy.zeros(output_array.shape )
def snake_case ( self ) -> numpy.ndarray:
A : Union[str, Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
A : List[str] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
A : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self ) -> None:
A : Optional[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
A : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
A : Tuple = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
for iteration in range(1 , iterations + 1 ):
A : Any = self.feedforward()
self.back_propagation()
if give_loss:
A : Optional[int] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def snake_case ( self , __UpperCAmelCase ) -> int:
A : List[str] = input_arr
A : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
A : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
A : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def snake_case__ ( lowerCamelCase_ ):
return 1 / (1 + numpy.exp(-value ))
def snake_case__ ( lowerCamelCase_ ):
return (value) * (1 - (value))
def snake_case__ ( ):
A : List[str] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
A : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
A : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 423
|
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return x if y == 0 else greatest_common_divisor(lowerCamelCase_ , x % y )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return (x * y) // greatest_common_divisor(lowerCamelCase_ , lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ = 20 ):
A : Optional[Any] = 1
for i in range(1 , n + 1 ):
A : Dict = lcm(lowerCamelCase_ , lowerCamelCase_ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 423
| 1
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_snake_case : Optional[Any] = logging.getLogger()
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('-f' )
__lowerCAmelCase = parser.parse_args()
return args.f
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = {}
__lowerCAmelCase = os.path.join(_A, 'all_results.json' )
if os.path.exists(_A ):
with open(_A, 'r' ) as f:
__lowerCAmelCase = json.load(_A )
else:
raise ValueError(F"""can\'t find {path}""" )
return results
def a_ ( ):
__lowerCAmelCase = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
_snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
@classmethod
def lowercase ( cls : int ) -> Union[str, Any]:
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase ( cls : str ) -> List[Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
self.assertLess(result['perplexity'] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
self.assertLess(result['perplexity'] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 2_8 )
self.assertGreaterEqual(result['eval_exact'] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase ( self : Optional[int] ) -> str:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_rouge1'] , 1_0 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_bleu'] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'translation_no_trainer' ) ) )
@slow
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase_ )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.10 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase ( self : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__lowerCAmelCase = get_results(lowerCAmelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'image_classification_no_trainer' ) ) )
| 53
|
class _lowercase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = arr.split(""",""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = [int(self.array[0] )] * len(self.array )
_lowercase = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_lowercase = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_lowercase = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A_: List[str] = input('please input some numbers:')
A_: Optional[int] = SubArray(whole_array)
A_: Dict = array.solve_sub_array()
print(('the results is:', re))
| 398
| 0
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__a : Any = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Union[str, Any] , __A: int = 14 ):
'''simple docstring'''
if group not in primes:
raise ValueError('''Unsupported Group''' )
a__ = primes[group]['''prime''']
a__ = primes[group]['''generator''']
a__ = int(hexlify(urandom(32 ) ) , base=16 )
def lowercase ( self: Tuple ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def lowercase ( self: int ):
'''simple docstring'''
a__ = pow(self.generator , self.__private_key , self.prime )
return hex(__A )[2:]
def lowercase ( self: Tuple , __A: int ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(__A , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowercase ( self: Optional[int] , __A: str ):
'''simple docstring'''
a__ = int(__A , base=16 )
if not self.is_valid_public_key(__A ):
raise ValueError('''Invalid public key''' )
a__ = pow(__A , self.__private_key , self.prime )
return shaaaa(str(__A ).encode() ).hexdigest()
@staticmethod
def lowercase ( __A: int , __A: int ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(__A , (prime - 1) // 2 , __A ) == 1
)
@staticmethod
def lowercase ( __A: str , __A: str , __A: int = 14 ):
'''simple docstring'''
a__ = int(__A , base=16 )
a__ = int(__A , base=16 )
a__ = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(__A , __A ):
raise ValueError('''Invalid public key''' )
a__ = pow(__A , __A , __A )
return shaaaa(str(__A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200
|
"""simple docstring"""
__a : List[Any] = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__a : Union[str, Any] = frozenset(['prompt', 'negative_prompt'])
__a : Any = frozenset([])
__a : Union[str, Any] = frozenset(['image'])
__a : Dict = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
__a : Dict = frozenset(['image'])
__a : Dict = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__a : Optional[Any] = frozenset(['prompt', 'image', 'negative_prompt'])
__a : List[Any] = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__a : Union[str, Any] = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
__a : Optional[Any] = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__a : int = frozenset(['image', 'mask_image'])
__a : Tuple = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__a : Optional[Any] = frozenset(['example_image', 'image', 'mask_image'])
__a : Optional[Any] = frozenset(['class_labels'])
__a : Tuple = frozenset(['class_labels'])
__a : int = frozenset(['batch_size'])
__a : int = frozenset([])
__a : Union[str, Any] = frozenset(['batch_size'])
__a : Tuple = frozenset([])
__a : Dict = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__a : Dict = frozenset(['prompt', 'negative_prompt'])
__a : Optional[int] = frozenset(['input_tokens'])
__a : str = frozenset(['input_tokens'])
| 200
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCamelCase: Optional[Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
__UpperCamelCase: Union[str, Any] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def SCREAMING_SNAKE_CASE__ ( _lowercase : Optional[int] , _lowercase : Tuple=False ) -> Any:
'''simple docstring'''
lowercase__ , lowercase__ : Tuple = create_model(
'HTSAT-tiny' , 'roberta' , _lowercase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=_lowercase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def SCREAMING_SNAKE_CASE__ ( _lowercase : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase__ : int = {}
lowercase__ : Union[str, Any] = r'.*sequential.(\d+).*'
lowercase__ : str = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase__ : List[Any] = key.replace(_lowercase , _lowercase )
if re.match(_lowercase , _lowercase ):
# replace sequential layers with list
lowercase__ : List[Any] = re.match(_lowercase , _lowercase ).group(1 )
lowercase__ : List[str] = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(_lowercase )//3}.linear.""" )
elif re.match(_lowercase , _lowercase ):
lowercase__ : Optional[int] = int(re.match(_lowercase , _lowercase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowercase__ : Optional[Any] = 1 if projecton_layer == 0 else 2
lowercase__ : Tuple = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowercase__ : List[Any] = value
lowercase__ : int = mixed_qkv.size(0 ) // 3
lowercase__ : Optional[Any] = mixed_qkv[:qkv_dim]
lowercase__ : Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
lowercase__ : int = mixed_qkv[qkv_dim * 2 :]
lowercase__ : Dict = query_layer
lowercase__ : Optional[Any] = key_layer
lowercase__ : List[Any] = value_layer
else:
lowercase__ : Any = value
return model_state_dict
def SCREAMING_SNAKE_CASE__ ( _lowercase : Dict , _lowercase : int , _lowercase : Tuple , _lowercase : Union[str, Any]=False ) -> Any:
'''simple docstring'''
lowercase__ , lowercase__ : Any = init_clap(_lowercase , enable_fusion=_lowercase )
clap_model.eval()
lowercase__ : int = clap_model.state_dict()
lowercase__ : Any = rename_state_dict(_lowercase )
lowercase__ : int = ClapConfig()
lowercase__ : str = enable_fusion
lowercase__ : Tuple = ClapModel(_lowercase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowercase , strict=_lowercase )
model.save_pretrained(_lowercase )
transformers_config.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase: List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
__UpperCamelCase: List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 266
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Optional[int], lowerCamelCase_: Dict, lowerCamelCase_: Tuple=7, lowerCamelCase_: Dict=3, lowerCamelCase_: Optional[Any]=30, lowerCamelCase_: str=400, lowerCamelCase_: List[Any]=True, lowerCamelCase_: List[Any]=None, lowerCamelCase_: Dict=True, lowerCamelCase_: Optional[Any]=[0.5, 0.5, 0.5], lowerCamelCase_: Dict=[0.5, 0.5, 0.5], lowerCamelCase_: Dict=True, lowerCamelCase_: List[str]=1 / 255, lowerCamelCase_: Dict=True, ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__ : str = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowercase__ : Optional[int] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Optional[Any] = min_resolution
lowercase__ : Union[str, Any] = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : str = size
lowercase__ : Dict = do_normalize
lowercase__ : Optional[int] = image_mean
lowercase__ : Dict = image_std
lowercase__ : int = do_rescale
lowercase__ : Any = rescale_factor
lowercase__ : Union[str, Any] = do_pad
def snake_case__( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__( self: Tuple, lowerCamelCase_: Tuple, lowerCamelCase_: int=False ):
if not batched:
lowercase__ : Any = image_inputs[0]
if isinstance(lowerCamelCase_, Image.Image ):
lowercase__ , lowercase__ : List[str] = image.size
else:
lowercase__ , lowercase__ : str = image.shape[1], image.shape[2]
if w < h:
lowercase__ : Optional[Any] = int(self.size['shortest_edge'] * h / w )
lowercase__ : Optional[int] = self.size['shortest_edge']
elif w > h:
lowercase__ : List[str] = self.size['shortest_edge']
lowercase__ : Any = int(self.size['shortest_edge'] * w / h )
else:
lowercase__ : List[str] = self.size['shortest_edge']
lowercase__ : List[Any] = self.size['shortest_edge']
else:
lowercase__ : int = []
for image in image_inputs:
lowercase__ , lowercase__ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : int = max(lowerCamelCase_, key=lambda lowerCamelCase_ : item[0] )[0]
lowercase__ : Tuple = max(lowerCamelCase_, key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A = YolosImageProcessor if is_vision_available() else None
def snake_case__( self: int ):
lowercase__ : Optional[int] = YolosImageProcessingTester(self )
@property
def snake_case__( self: Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__( self: Dict ):
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_, 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'size' ) )
def snake_case__( self: List[Any] ):
lowercase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad, lowerCamelCase_ )
lowercase__ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, lowerCamelCase_ )
def snake_case__( self: str ):
pass
def snake_case__( self: Optional[Any] ):
# Initialize image_processing
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, Image.Image )
# Test not batched input
lowercase__ : Tuple = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_, batched=lowerCamelCase_ )
lowercase__ : Dict = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__( self: str ):
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ : Any = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_, batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__( self: Union[str, Any] ):
# Initialize image_processing
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, torch.Tensor )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ : List[Any] = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_, batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__( self: int ):
# Initialize image_processings
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
lowercase__ : List[str] = self.image_processing_class(do_resize=lowerCamelCase_, do_normalize=lowerCamelCase_, do_rescale=lowerCamelCase_ )
# create random PyTorch tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase__ : Optional[int] = image_processing_a.pad(lowerCamelCase_, return_tensors='pt' )
lowercase__ : int = image_processing_a(lowerCamelCase_, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def snake_case__( self: List[str] ):
# prepare image and target
lowercase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
lowercase__ : List[str] = json.loads(f.read() )
lowercase__ : List[str] = {'image_id': 39769, 'annotations': target}
# encode them
lowercase__ : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
lowercase__ : Optional[Any] = image_processing(images=lowerCamelCase_, annotations=lowerCamelCase_, return_tensors='pt' )
# verify pixel values
lowercase__ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape, lowerCamelCase_ )
lowercase__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], lowerCamelCase_, atol=1E-4 ) )
# verify area
lowercase__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], lowerCamelCase_ ) )
# verify boxes
lowercase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, lowerCamelCase_ )
lowercase__ : Tuple = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], lowerCamelCase_, atol=1E-3 ) )
# verify image_id
lowercase__ : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], lowerCamelCase_ ) )
# verify is_crowd
lowercase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], lowerCamelCase_ ) )
# verify class_labels
lowercase__ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], lowerCamelCase_ ) )
# verify orig_size
lowercase__ : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], lowerCamelCase_ ) )
# verify size
lowercase__ : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], lowerCamelCase_ ) )
@slow
def snake_case__( self: Dict ):
# prepare image, target and masks_path
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
lowercase__ : Optional[Any] = json.loads(f.read() )
lowercase__ : List[Any] = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowercase__ : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__ : List[Any] = YolosImageProcessor(format='coco_panoptic' )
lowercase__ : Dict = image_processing(images=lowerCamelCase_, annotations=lowerCamelCase_, masks_path=lowerCamelCase_, return_tensors='pt' )
# verify pixel values
lowercase__ : str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape, lowerCamelCase_ )
lowercase__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], lowerCamelCase_, atol=1E-4 ) )
# verify area
lowercase__ : Optional[int] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], lowerCamelCase_ ) )
# verify boxes
lowercase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, lowerCamelCase_ )
lowercase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], lowerCamelCase_, atol=1E-3 ) )
# verify image_id
lowercase__ : Any = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], lowerCamelCase_ ) )
# verify is_crowd
lowercase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], lowerCamelCase_ ) )
# verify class_labels
lowercase__ : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], lowerCamelCase_ ) )
# verify masks
lowercase__ : Optional[Any] = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), lowerCamelCase_ )
# verify orig_size
lowercase__ : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], lowerCamelCase_ ) )
# verify size
lowercase__ : Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], lowerCamelCase_ ) )
| 266
| 1
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Optional[Any] ):
__lowerCamelCase : Optional[Any] ={} # Mapping from char to TrieNode
__lowerCamelCase : Optional[Any] =False
def __lowercase ( self :Dict , __lowercase :Tuple ):
for word in words:
self.insert(__lowercase )
def __lowercase ( self :Optional[int] , __lowercase :int ):
__lowerCamelCase : int =self
for char in word:
if char not in curr.nodes:
__lowerCamelCase : Optional[int] =TrieNode()
__lowerCamelCase : Optional[int] =curr.nodes[char]
__lowerCamelCase : List[Any] =True
def __lowercase ( self :List[str] , __lowercase :List[Any] ):
__lowerCamelCase : Optional[int] =self
for char in word:
if char not in curr.nodes:
return False
__lowerCamelCase : Union[str, Any] =curr.nodes[char]
return curr.is_leaf
def __lowercase ( self :str , __lowercase :str ):
def _delete(__lowercase :Optional[Any] , __lowercase :Any , __lowercase :Tuple ) -> bool:
if index == len(__lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
__lowerCamelCase : int =False
return len(curr.nodes ) == 0
__lowerCamelCase : Dict =word[index]
__lowerCamelCase : Optional[int] =curr.nodes.get(__lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__lowerCamelCase : Optional[int] =_delete(__lowercase , __lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowercase , 0 )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if node.is_leaf:
print(_snake_case , end=''' ''' )
for key, value in node.nodes.items():
print_words(_snake_case , word + key )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] ='''banana bananas bandana band apple all beast'''.split()
__lowerCamelCase : Any =TrieNode()
root.insert_many(_snake_case )
# print_words(root, "")
assert all(root.find(_snake_case ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
print(str(_snake_case ) , '''works!''' if passes else '''doesn\'t work :(''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert test_trie()
def lowerCAmelCase_ ( ):
'''simple docstring'''
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 708
|
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __lowercase ( self :Optional[int] , __lowercase :Union[str, Any] , __lowercase :List[Any] ):
pass
def __lowercase ( self :Any ):
pass
def __lowercase ( self :str ):
pass
def __lowercase ( self :str , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Union[str, Any] , __lowercase :Union[str, Any] , __lowercase :Any=None , **__lowercase :Tuple ):
__lowerCamelCase : Optional[Any] =VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
__lowerCamelCase : Dict =TFVisionTextDualEncoderModel(__lowercase )
__lowerCamelCase : List[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowercase ( self :str , __lowercase :Any , __lowercase :Dict , __lowercase :Any , __lowercase :int , __lowercase :str=None , **__lowercase :List[str] ):
__lowerCamelCase , __lowerCamelCase : Any =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : List[str] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Optional[int] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowercase ( self :int , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Any , __lowercase :Optional[Any] , __lowercase :Tuple=None , **__lowercase :List[Any] ):
__lowerCamelCase , __lowerCamelCase : int =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : Dict ={'''vision_model''': vision_model, '''text_model''': text_model}
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
__lowerCamelCase : Optional[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowercase ( self :Optional[Any] , __lowercase :Dict , __lowercase :int , __lowercase :Dict , __lowercase :List[Any] , __lowercase :Tuple=None , **__lowercase :Optional[Any] ):
__lowerCamelCase , __lowerCamelCase : Tuple =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Any =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__lowerCamelCase : Optional[Any] =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__lowerCamelCase : Dict =TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__lowerCamelCase : Optional[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__lowerCamelCase : Any =after_output[0].numpy()
__lowerCamelCase : Tuple =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
def __lowercase ( self :int , __lowercase :Any , __lowercase :List[Any] , __lowercase :Any , __lowercase :str , __lowercase :int=None , **__lowercase :List[Any] ):
__lowerCamelCase , __lowerCamelCase : Any =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : Union[str, Any] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Any =model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__lowerCamelCase : Optional[Any] =output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Dict =to_atuple(vision_model.config.image_size )
__lowerCamelCase : Tuple =to_atuple(vision_model.config.patch_size )
__lowerCamelCase : Tuple =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCamelCase : Any =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCamelCase : List[Any] =output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self :List[Any] , __lowercase :np.ndarray , __lowercase :np.ndarray , __lowercase :float ):
__lowerCamelCase : Dict =np.abs((a - b) ).max()
self.assertLessEqual(__lowercase , __lowercase , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __lowercase ( self :List[str] ):
__lowerCamelCase : Any =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__lowercase )
def __lowercase ( self :Tuple ):
__lowerCamelCase : List[Any] =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowercase )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : Dict =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowercase )
def __lowercase ( self :Any ):
__lowerCamelCase : List[Any] =self.prepare_config_and_inputs()
self.check_save_load(**__lowercase )
def __lowercase ( self :List[str] ):
__lowerCamelCase : Any =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowercase )
@slow
def __lowercase ( self :Tuple ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] =self.get_pretrained_model_and_inputs()
__lowerCamelCase : Optional[Any] =model_a(**__lowercase )
__lowerCamelCase : int =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowercase )
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__lowerCamelCase : Tuple =model_a(**__lowercase )
__lowerCamelCase : Optional[int] =after_outputs[0].numpy()
__lowerCamelCase : Optional[Any] =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Union[str, Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
__lowerCamelCase : Any =13
__lowerCamelCase : Optional[int] =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCamelCase : Dict =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCamelCase : Optional[Any] =random_attention_mask([batch_size, 4] )
__lowerCamelCase : Dict ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowercase ( self :Optional[Any] , __lowercase :List[Any] , __lowercase :List[str] ):
__lowerCamelCase : Optional[Any] =TFViTModel(__lowercase , name='''vision_model''' )
__lowerCamelCase : Union[str, Any] =TFBertModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : str =TFViTModelTester(self )
__lowerCamelCase : Any =TFBertModelTester(self )
__lowerCamelCase : Optional[Any] =vit_model_tester.prepare_config_and_inputs()
__lowerCamelCase : Tuple =bert_model_tester.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict =vision_config_and_inputs
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Optional[Any] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__lowerCamelCase : List[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
__lowerCamelCase : Tuple =13
__lowerCamelCase : str =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCamelCase : Any =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCamelCase : Optional[int] =random_attention_mask([batch_size, 4] )
__lowerCamelCase : Optional[Any] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowercase ( self :Any , __lowercase :Tuple , __lowercase :List[Any] , __lowercase :Union[str, Any] , __lowercase :Any , __lowercase :int=None , **__lowercase :List[str] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] =self.get_vision_text_model(__lowercase , __lowercase )
__lowerCamelCase : Optional[int] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__lowerCamelCase : Any =model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__lowerCamelCase : str =output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCamelCase : int =to_atuple(vision_model.config.image_size )
__lowerCamelCase : Union[str, Any] =to_atuple(vision_model.config.patch_size )
__lowerCamelCase : Tuple =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCamelCase : Any =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCamelCase : Any =output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self :Any , __lowercase :Any , __lowercase :Optional[Any] ):
__lowerCamelCase : str =TFDeiTModel(__lowercase , name='''vision_model''' )
__lowerCamelCase : List[str] =TFRobertaModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __lowercase ( self :Dict ):
__lowerCamelCase : Optional[int] =TFDeiTModelTester(self )
__lowerCamelCase : Any =TFRobertaModelTester(self )
__lowerCamelCase : Dict =vit_model_tester.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] =bert_model_tester.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] =vision_config_and_inputs
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : List[Any] =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Tuple ):
__lowerCamelCase : str =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
__lowerCamelCase : str =13
__lowerCamelCase : int =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCamelCase : int =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCamelCase : str =random_attention_mask([batch_size, 4] )
__lowerCamelCase : Optional[Any] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowercase ( self :List[Any] , __lowercase :str , __lowercase :Tuple ):
__lowerCamelCase : int =TFCLIPVisionModel(__lowercase , name='''vision_model''' )
__lowerCamelCase : Optional[Any] =TFBertModel(__lowercase , name='''text_model''' )
return vision_model, text_model
def __lowercase ( self :Tuple ):
__lowerCamelCase : List[str] =TFCLIPVisionModelTester(self )
__lowerCamelCase : Union[str, Any] =TFBertModelTester(self )
__lowerCamelCase : str =clip_model_tester.prepare_config_and_inputs()
__lowerCamelCase : Tuple =bert_model_tester.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase : Any =vision_config_and_inputs
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Union[str, Any] =TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__lowercase )
__lowerCamelCase : str =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__lowerCamelCase : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowerCamelCase : int =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowercase , padding=__lowercase , return_tensors='''np''' )
__lowerCamelCase : str =model(**__lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCamelCase : int =np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowercase , atol=1e-3 ) )
| 363
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__UpperCAmelCase = True
except (ImportError, AttributeError):
__UpperCAmelCase = object
def _snake_case ( *A , **A ) -> str:
pass
__UpperCAmelCase = False
__UpperCAmelCase = logging.get_logger('''transformers-cli/serving''')
def _snake_case ( A ) -> Optional[int]:
lowerCAmelCase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(A , args.host , args.port , args.workers )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : dict
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : str
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any
class a__ ( a__ ):
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = pipeline
lowerCAmelCase__ = host
lowerCAmelCase__ = port
lowerCAmelCase__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
lowerCAmelCase__ = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) ) -> List[Any]:
try:
lowerCAmelCase__ = self._pipeline.tokenizer.tokenize(lowerCamelCase_ )
if return_ids:
lowerCAmelCase__ = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
return ServeTokenizeResult(tokens=lowerCamelCase_ , tokens_ids=lowerCamelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(lowerCamelCase_ )} )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , ) -> Optional[Any]:
try:
lowerCAmelCase__ = self._pipeline.tokenizer.decode(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(lowerCamelCase_ )} )
async def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=Body(lowerCamelCase_ , embed=lowerCamelCase_ ) ) -> List[str]:
# Check we don't have empty string
if len(lowerCamelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCAmelCase__ = self._pipeline(lowerCamelCase_ )
return ServeForwardResult(output=lowerCamelCase_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(lowerCamelCase_ )} )
| 90
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = '''Wav2Vec2FeatureExtractor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.feature_extractor
__magic_name__ = False
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
try:
return super().from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , UpperCamelCase_ , )
__magic_name__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = WavaVecaCTCTokenizer.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
return cls(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__magic_name__ = kwargs.pop('''raw_speech''' )
else:
__magic_name__ = kwargs.pop('''audio''' , UpperCamelCase_ )
__magic_name__ = kwargs.pop('''sampling_rate''' , UpperCamelCase_ )
__magic_name__ = kwargs.pop('''text''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__magic_name__ = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
__magic_name__ = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ = encodings['''input_ids''']
return inputs
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = kwargs.pop('''input_features''' , UpperCamelCase_ )
__magic_name__ = kwargs.pop('''labels''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__magic_name__ = args[0]
__magic_name__ = args[1:]
if input_features is not None:
__magic_name__ = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if labels is not None:
__magic_name__ = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__magic_name__ = labels['''input_ids''']
return input_features
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@contextmanager
def lowerCAmelCase__ ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__magic_name__ = True
__magic_name__ = self.tokenizer
yield
__magic_name__ = self.feature_extractor
__magic_name__ = False
| 490
| 0
|
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: int = 1000 ) -> int:
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = 0
for a in range(1 ,n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE_ = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE_ = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE_ = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 238
|
'''simple docstring'''
from typing import Any
def _UpperCamelCase ( lowerCAmelCase__: list ) -> list[Any]:
if not input_list:
return []
SCREAMING_SNAKE_CASE_ = [input_list.count(lowerCAmelCase__ ) for value in input_list]
SCREAMING_SNAKE_CASE_ = max(lowerCAmelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCAmelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238
| 1
|
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : List[str] = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase_ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 105
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase__( A ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase__( A ):
snake_case__ : Dict = create_tensor(A )
snake_case__ : Any = gather(A )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase__( A ):
snake_case__ : Dict = [state.process_index]
snake_case__ : List[str] = gather_object(A )
assert len(A ) == state.num_processes, f'''{gathered_obj}, {len(A )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def lowercase__( A ):
snake_case__ : Optional[Any] = create_tensor(A )
snake_case__ : Optional[int] = broadcast(A )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase__( A ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
snake_case__ : Dict = torch.arange(state.num_processes + 1 ).to(state.device )
else:
snake_case__ : Optional[Any] = torch.arange(state.num_processes ).to(state.device )
snake_case__ : List[Any] = pad_across_processes(A )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase__( A ):
# For now runs on only two processes
if state.num_processes != 2:
return
snake_case__ : Tuple = create_tensor(A )
snake_case__ : Optional[Any] = reduce(A , 'sum' )
snake_case__ : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(A , A ), f'''{reduced_tensor} != {truth_tensor}'''
def lowercase__( A ):
# For now runs on only two processes
if state.num_processes != 2:
return
snake_case__ : Optional[Any] = create_tensor(A )
snake_case__ : Optional[Any] = reduce(A , 'mean' )
snake_case__ : int = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(A , A ), f'''{reduced_tensor} != {truth_tensor}'''
def lowercase__( A ):
# For xla_spawn (TPUs)
main()
def lowercase__( ):
snake_case__ : Optional[Any] = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(A )
state.print('testing gather_object' )
test_gather_object(A )
state.print('testing broadcast' )
test_broadcast(A )
state.print('testing pad_across_processes' )
test_pad_across_processes(A )
state.print('testing reduce_sum' )
test_reduce_sum(A )
state.print('testing reduce_mean' )
test_reduce_mean(A )
if __name__ == "__main__":
main()
| 170
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def A__ ( A : Optional[Any]):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCamelCase : Dict = k.replace(A , A)
if k.startswith("encoder"):
UpperCamelCase : int = k.replace(".attn" , ".self_attn")
UpperCamelCase : Any = k.replace("norm1" , "self_attn_layer_norm")
UpperCamelCase : Any = k.replace("norm2" , "final_layer_norm")
elif k.startswith("decoder"):
UpperCamelCase : Any = k.replace("norm1" , "self_attn_layer_norm")
UpperCamelCase : str = k.replace("norm2" , "encoder_attn_layer_norm")
UpperCamelCase : Optional[Any] = k.replace("norm3" , "final_layer_norm")
return k
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : Any = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
UpperCamelCase : int = sd.pop(A)
UpperCamelCase : str = k.replace("layernorm_embedding" , "layer_norm")
assert new_k not in sd
UpperCamelCase : Union[str, Any] = v
lowerCAmelCase_ = ['START']
@torch.no_grad()
def A__ ( A : int , A : Dict , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[Any] = torch.load(A , map_location="cpu")
UpperCamelCase : int = model["model"]
UpperCamelCase : str = BlenderbotConfig.from_json_file(A)
UpperCamelCase : Tuple = BlenderbotForConditionalGeneration(A)
UpperCamelCase : Any = m.model.state_dict().keys()
UpperCamelCase : Any = []
UpperCamelCase : Optional[Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCamelCase : Union[str, Any] = rename_state_dict_key(A)
if new_k not in valid_keys:
failures.append([k, new_k])
else:
UpperCamelCase : Any = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A)
m.model.load_state_dict(A , strict=A)
m.half()
m.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowerCAmelCase_ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 719
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase : Optional[int] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
UpperCamelCase : Union[str, Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowerCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = "lower newer"
UpperCamelCase : Optional[int] = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Optional[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase : int = "lower"
UpperCamelCase : List[Any] = ["low", "er</w>"]
UpperCamelCase : Dict = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCamelCase : List[Any] = tokens + ["<unk>"]
UpperCamelCase : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : List[Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCamelCase : Dict = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
UpperCamelCase : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
UpperCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 435
| 0
|
'''simple docstring'''
import re
def A ( UpperCamelCase_ : str ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a :Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a :str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : Tuple = value
return new_state_dict
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : str = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias_cross_attn[-256:]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = 800 if """detection""" in checkpoint_url else 1000
SCREAMING_SNAKE_CASE__ : List[str] = target_max_size / current_max_size
SCREAMING_SNAKE_CASE__ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = F.to_tensor(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
logger.info("""Converting model...""" )
# load original state dict
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ : Optional[int] = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = 15
SCREAMING_SNAKE_CASE__ : Any = 2
SCREAMING_SNAKE_CASE__ : str = {0: """table""", 1: """table rotated"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE__ : Tuple = 125
SCREAMING_SNAKE_CASE__ : str = 6
SCREAMING_SNAKE_CASE__ : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
SCREAMING_SNAKE_CASE__ : Any = idalabel
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE__ : Dict = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
SCREAMING_SNAKE_CASE__ : Tuple = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = Image.open(__lowerCAmelCase ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Dict = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : List[Any] = (1, 15, 3)
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
SCREAMING_SNAKE_CASE__ : Dict = (1, 125, 7)
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
SCREAMING_SNAKE_CASE__ : List[Any] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
a :Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a :int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 680
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : int = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : List[Any] = "xmod"
def __init__( self : Dict , lowercase : int=3_0_5_2_2 , lowercase : Tuple=7_6_8 , lowercase : Dict=1_2 , lowercase : Optional[int]=1_2 , lowercase : int=3_0_7_2 , lowercase : str="gelu" , lowercase : Optional[int]=0.1 , lowercase : Any=0.1 , lowercase : int=5_1_2 , lowercase : int=2 , lowercase : int=0.0_2 , lowercase : Optional[Any]=1e-12 , lowercase : int=1 , lowercase : Dict=0 , lowercase : List[Any]=2 , lowercase : List[Any]="absolute" , lowercase : List[str]=True , lowercase : Tuple=None , lowercase : str=False , lowercase : List[Any]=2 , lowercase : List[str]=False , lowercase : str=True , lowercase : Optional[int]=True , lowercase : Any=("en_XX",) , lowercase : Union[str, Any]=None , **lowercase : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = pre_norm
UpperCamelCase__ = adapter_reduction_factor
UpperCamelCase__ = adapter_layer_norm
UpperCamelCase__ = adapter_reuse_layer_norm
UpperCamelCase__ = ln_before_adapter
UpperCamelCase__ = list(lowercase )
UpperCamelCase__ = default_language
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def A ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 712
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : List[str] , lowercase : List[Any]=1_3 , lowercase : Union[str, Any]=7 , lowercase : Dict=True , lowercase : Optional[int]=True , lowercase : List[Any]=True , lowercase : Dict=True , lowercase : List[str]=9_9 , lowercase : Dict=1_6 , lowercase : Dict=3_6 , lowercase : str=6 , lowercase : List[Any]=6 , lowercase : int=6 , lowercase : Union[str, Any]=3_7 , lowercase : Union[str, Any]="gelu" , lowercase : List[Any]=0.1 , lowercase : List[str]=0.1 , lowercase : str=5_1_2 , lowercase : Any=1_6 , lowercase : str=2 , lowercase : List[Any]=0.0_2 , lowercase : Tuple=3 , lowercase : Dict=4 , lowercase : Dict=None , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_hidden_groups
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def A ( self : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Dict , lowercase : str , lowercase : int , lowercase : List[str] , lowercase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
UpperCamelCase__ = model(lowercase , token_type_ids=lowercase )
UpperCamelCase__ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : int , lowercase : List[str] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , sentence_order_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def A ( self : Optional[int] , lowercase : Dict , lowercase : List[Any] , lowercase : Any , lowercase : List[str] , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
'''simple docstring'''
UpperCamelCase__ = AlbertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[Any] , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AlbertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Any , lowercase : Optional[Any] , lowercase : Any , lowercase : Dict , lowercase : Any , lowercase : Optional[int] , lowercase : str , lowercase : Dict ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = AlbertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[str] , lowercase : int , lowercase : Any , lowercase : Tuple , lowercase : List[str] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = AlbertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Any , lowercase : List[str] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = AlbertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
__a : Any = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : List[Any] = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = True
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : Optional[Any]=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=lowercase , hidden_size=3_7 )
def A ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
def A ( self : Tuple ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def A ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def A ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def A ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(*lowercase )
@slow
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = AlbertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCamelCase__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(lowercase , attention_mask=lowercase )[0]
UpperCamelCase__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowercase )
UpperCamelCase__ = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) )
| 265
| 0
|
import math
from collections.abc import Callable
def __lowerCAmelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = xa
SCREAMING_SNAKE_CASE = xa
while True:
if x_n == x_na or function(_UpperCamelCase ) == function(_UpperCamelCase ):
raise ZeroDivisionError('float division by zero, could not find root' )
SCREAMING_SNAKE_CASE = x_na - (
function(_UpperCamelCase ) / ((function(_UpperCamelCase ) - function(_UpperCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
SCREAMING_SNAKE_CASE = x_na
SCREAMING_SNAKE_CASE = x_na
def __lowerCAmelCase ( _UpperCamelCase : float ) -> float:
'''simple docstring'''
return math.pow(_UpperCamelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 439
|
import baseaa
def __lowerCAmelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def __lowerCAmelCase ( _UpperCamelCase : bytes ) -> str:
'''simple docstring'''
return baseaa.aaadecode(_UpperCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 439
| 1
|
from copy import deepcopy
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : list[int] | None = None , lowerCamelCase__ : int | None = None ):
if arr is None and size is not None:
a__ : Union[str, Any] = size
a__ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowerCamelCase__ )
else:
raise ValueError("Either arr or size must be specified" )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : list[int] ):
a__ : Any = len(lowerCamelCase__ )
a__ : List[Any] = deepcopy(lowerCamelCase__ )
for i in range(1 , self.size ):
a__ : Union[str, Any] = self.next_(lowerCamelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def _UpperCamelCase( self : Tuple ):
a__ : List[str] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a__ : Optional[Any] = self.next_(lowerCamelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : int ):
return index + (index & (-index))
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : int ):
return index - (index & (-index))
def _UpperCamelCase( self : str , lowerCamelCase__ : int , lowerCamelCase__ : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a__ : Optional[int] = self.next_(lowerCamelCase__ )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
self.add(lowerCamelCase__ , value - self.get(lowerCamelCase__ ) )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ):
if right == 0:
return 0
a__ : Tuple = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a__ : List[Any] = self.prev(lowerCamelCase__ )
return result
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
return self.prefix(lowerCamelCase__ ) - self.prefix(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ):
return self.query(lowerCamelCase__ , index + 1 )
def _UpperCamelCase( self : int , lowerCamelCase__ : int ):
value -= self.tree[0]
if value < 0:
return -1
a__ : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a__ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
a__ : Union[str, Any] = cst_fwd.get(__a , np.inf )
a__ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
a__ : List[str] = new_cost_f
a__ : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
a__ : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase_ ( __a , __a , __a , __a ) -> int:
a__ : Any = -1
a__ : List[str] = set()
a__ : Optional[Any] = set()
a__ : Optional[int] = {source: 0}
a__ : Optional[Any] = {destination: 0}
a__ : List[Any] = {source: None}
a__ : Union[str, Any] = {destination: None}
a__ : PriorityQueue[Any] = PriorityQueue()
a__ : PriorityQueue[Any] = PriorityQueue()
a__ : int = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
a__, a__ : Union[str, Any] = queue_forward.get()
visited_forward.add(__a )
a__, a__ : List[Any] = queue_backward.get()
visited_backward.add(__a )
a__ : Union[str, Any] = pass_and_relaxation(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
a__ : Dict = pass_and_relaxation(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
a__ : Tuple = shortest_distance
return shortest_path_distance
UpperCamelCase : Optional[Any] = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCamelCase : List[Any] = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
| 1
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 270
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase__ = get_logger(__name__)
class UpperCamelCase :
__UpperCamelCase = """dummy_data"""
__UpperCamelCase = """datasets"""
__UpperCamelCase = False
def __init__( self : Dict ,_lowerCAmelCase : str ,_lowerCAmelCase : str ,_lowerCAmelCase : Union[Version, str] ,_lowerCAmelCase : Optional[str] = None ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : bool = True ,_lowerCAmelCase : Optional[List[Callable]] = None ,):
"""simple docstring"""
__snake_case = 0
__snake_case = dataset_name
__snake_case = cache_dir
__snake_case = use_local_dummy_data
__snake_case = config
# download_callbacks take a single url as input
__snake_case = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__snake_case = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__snake_case = str(_lowerCAmelCase )
# to be downloaded
__snake_case = None
__snake_case = None
@property
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
if self._dummy_file is None:
__snake_case = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : List[str] ):
"""simple docstring"""
__snake_case = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__snake_case = cached_path(
_lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=_lowerCAmelCase ,force_extract=_lowerCAmelCase )
return os.path.join(_lowerCAmelCase ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
if self._bucket_url is None:
__snake_case = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : List[Any] ,*_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__snake_case = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__snake_case = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
return self.create_dummy_data_dict(_lowerCAmelCase ,_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(_lowerCAmelCase ,_lowerCAmelCase )
else:
return self.create_dummy_data_single(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : Dict ,_lowerCAmelCase : str ,*_lowerCAmelCase : Dict ):
"""simple docstring"""
return self.download_and_extract(_lowerCAmelCase )
def UpperCamelCase_ ( self : int ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.download_and_extract(_lowerCAmelCase )
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : str ,*_lowerCAmelCase : List[str] ,**_lowerCAmelCase : List[Any] ):
"""simple docstring"""
return path
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return {}
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : int ,_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__snake_case = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
for single_url in single_urls:
download_callback(_lowerCAmelCase )
else:
__snake_case = single_urls
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = [os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(Path(_lowerCAmelCase ).name ) ) for x in single_urls]
else:
__snake_case = single_urls
__snake_case = os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(Path(_lowerCAmelCase ).name ) )
__snake_case = value
# make sure that values are unique
if all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__snake_case = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : str ,_lowerCAmelCase : Any ):
"""simple docstring"""
__snake_case = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__snake_case = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,_lowerCAmelCase ) ) for url in data_url )
__snake_case = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__snake_case = [data_url[0]] * len(_lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(_lowerCAmelCase )
return dummy_data_list
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : Any ,_lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(_lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : Any ):
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : List[str] ):
"""simple docstring"""
def _iter_archive_members(_lowerCAmelCase : Tuple ):
# this preserves the order of the members inside the ZIP archive
__snake_case = Path(self.dummy_file ).parent
__snake_case = path.relative_to(_lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__snake_case = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_lowerCAmelCase )
__snake_case = Path(_lowerCAmelCase )
__snake_case = _iter_archive_members(_lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(_lowerCAmelCase ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : Tuple ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = [paths]
for path in paths:
if os.path.isfile(_lowerCAmelCase ):
if os.path.basename(_lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_lowerCAmelCase ):
if os.path.basename(_lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(_lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
| 524
| 0
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( a_, unittest.TestCase ):
_lowerCamelCase : Optional[Any]= None
_lowerCamelCase : Any= BloomTokenizerFast
_lowerCamelCase : Optional[Any]= BloomTokenizerFast
_lowerCamelCase : List[Any]= True
_lowerCamelCase : Tuple= False
_lowerCamelCase : List[str]= "tokenizer_file"
_lowerCamelCase : Optional[int]= {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _snake_case ( self) -> int:
super().setUp()
UpperCAmelCase_ : Optional[Any] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer')
tokenizer.save_pretrained(self.tmpdirname)
def _snake_case ( self , **_snake_case) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase)
def _snake_case ( self) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Optional[int] = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
UpperCAmelCase_ : Any = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_encode_plus(_lowerCAmelCase)['input_ids']
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
UpperCAmelCase_ : Dict = tokenizer.batch_decode(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
def _snake_case ( self , _snake_case=6) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCAmelCase_ : str = 'This is a simple input'
UpperCAmelCase_ : Tuple = ['This is a simple input 1', 'This is a simple input 2']
UpperCAmelCase_ : List[Any] = ('This is a simple input', 'This is a pair')
UpperCAmelCase_ : Optional[int] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase)
tokenizer_r.encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase)
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase)
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase)
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase)
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding')
UpperCAmelCase_ : List[Any] = None # Hotfixing padding = None
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length')
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length')
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length')
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length')
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' , )
def _snake_case ( self) -> int:
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : List[Any] = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_lowerCAmelCase)
UpperCAmelCase_ : Optional[int] = next(iter(_lowerCAmelCase))['premise'] # pick up one data
UpperCAmelCase_ : Any = list(sample_data.values())
UpperCAmelCase_ : List[str] = list(map(tokenizer.encode , _lowerCAmelCase))
UpperCAmelCase_ : List[str] = [tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase) for x in output_tokens]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
def _snake_case ( self) -> Optional[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 709
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase ( a_ ):
_lowerCamelCase : torch.FloatTensor
class lowercase ( a_, a_ ):
@register_to_config
def __init__( self , _snake_case = 6_5536 , _snake_case = None , _snake_case = 2 , _snake_case = 2 , _snake_case = 0 , _snake_case = "fourier" , _snake_case = True , _snake_case = False , _snake_case = 0.0 , _snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case = "UNetMidBlock1D" , _snake_case = None , _snake_case = (32, 32, 64) , _snake_case = None , _snake_case = 8 , _snake_case = 1 , _snake_case = False , ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase_ : Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case)
UpperCAmelCase_ : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase_ : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case)
UpperCAmelCase_ : List[Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase_ : Dict = block_out_channels[0] * 4
UpperCAmelCase_ : List[Any] = TimestepEmbedding(
in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , )
UpperCAmelCase_ : int = nn.ModuleList([])
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = nn.ModuleList([])
UpperCAmelCase_ : Any = None
# down
UpperCAmelCase_ : Dict = in_channels
for i, down_block_type in enumerate(_snake_case):
UpperCAmelCase_ : int = output_channel
UpperCAmelCase_ : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase_ : int = i == len(_snake_case) - 1
UpperCAmelCase_ : Any = get_down_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_snake_case)
# mid
UpperCAmelCase_ : Optional[int] = get_mid_block(
_snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , )
# up
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case))
UpperCAmelCase_ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase_ : Tuple = out_channels
else:
UpperCAmelCase_ : int = block_out_channels[0]
for i, up_block_type in enumerate(_snake_case):
UpperCAmelCase_ : Dict = output_channel
UpperCAmelCase_ : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_snake_case) - 1 else final_upsample_channels
)
UpperCAmelCase_ : str = i == len(_snake_case) - 1
UpperCAmelCase_ : Union[str, Any] = get_up_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_snake_case)
UpperCAmelCase_ : Dict = output_channel
# out
UpperCAmelCase_ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
UpperCAmelCase_ : Any = get_out_block(
out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case = True , ) -> Union[UNetaDOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = timestep
if not torch.is_tensor(_snake_case):
UpperCAmelCase_ : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_snake_case) and len(timesteps.shape) == 0:
UpperCAmelCase_ : Tuple = timesteps[None].to(sample.device)
UpperCAmelCase_ : Any = self.time_proj(_snake_case)
if self.config.use_timestep_embedding:
UpperCAmelCase_ : int = self.time_mlp(_snake_case)
else:
UpperCAmelCase_ : int = timestep_embed[..., None]
UpperCAmelCase_ : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
UpperCAmelCase_ : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
UpperCAmelCase_ : Optional[Any] = ()
for downsample_block in self.down_blocks:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = downsample_block(hidden_states=_snake_case , temb=_snake_case)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase_ : List[Any] = self.mid_block(_snake_case , _snake_case)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
UpperCAmelCase_ : int = down_block_res_samples[-1:]
UpperCAmelCase_ : Tuple = down_block_res_samples[:-1]
UpperCAmelCase_ : List[Any] = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case)
# 5. post-process
if self.out_block:
UpperCAmelCase_ : Optional[Any] = self.out_block(_snake_case , _snake_case)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_snake_case)
| 471
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = FlaxAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = FlaxAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : str = tokenizer('''Do you support jax jitted function?''' ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase_ ):
return model(**_SCREAMING_SNAKE_CASE )
eval(**_SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = FlaxRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = tokenizer('''Do you support jax jitted function?''' ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase_ ):
return model(**_SCREAMING_SNAKE_CASE )
eval(**_SCREAMING_SNAKE_CASE ).block_until_ready()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoModel.from_pretrained('''bert-base''' )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : int = FlaxAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' ,):
UpperCAmelCase__ : List[Any] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE ,'''Use `from_pt=True` to load this model''' ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 614
|
"""simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str ) -> str:
a_ : int = len(_SCREAMING_SNAKE_CASE )
a_ : int = len(_SCREAMING_SNAKE_CASE )
a_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
a_ : list = []
for char_count in range(_SCREAMING_SNAKE_CASE ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 473
| 0
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
A__ : int =100
A__ : Tuple =set(range(3, NUM_PRIMES, 2))
primes.add(2)
A__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__A : set[int] = set()
__A : int
__A : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A_ ( __SCREAMING_SNAKE_CASE : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __SCREAMING_SNAKE_CASE ):
if len(partition(__SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 706
|
'''simple docstring'''
from collections.abc import Sequence
def A_ ( __SCREAMING_SNAKE_CASE : Sequence[float] , __SCREAMING_SNAKE_CASE : bool = False ) -> float:
"""simple docstring"""
if not arr:
return 0
__A : Any = 0 if allow_empty_subarrays else float("""-inf""" )
__A : List[Any] = 0.0
for num in arr:
__A : Tuple = max(0 if allow_empty_subarrays else num , curr_sum + num )
__A : Optional[int] = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A__ : Any =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 499
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=7 ,__UpperCAmelCase=3 ,__UpperCAmelCase=18 ,__UpperCAmelCase=30 ,__UpperCAmelCase=400 ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,) -> Tuple:
lowerCAmelCase__ : List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : List[str] = min_resolution
lowerCAmelCase__ : Dict = max_resolution
lowerCAmelCase__ : Dict = do_resize
lowerCAmelCase__ : Any = size
lowerCAmelCase__ : Tuple = apply_ocr
def UpperCAmelCase_ ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""size""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""apply_ocr""" ) )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,__UpperCAmelCase )
self.assertIsInstance(encoding.boxes ,__UpperCAmelCase )
# Test batched
lowerCAmelCase__ : Dict = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def UpperCAmelCase_ ( self ) -> int:
# Initialize image_processing
lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowerCAmelCase__ : Any = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowerCAmelCase__ : str = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def UpperCAmelCase_ ( self ) -> int:
# with apply_OCR = True
lowerCAmelCase__ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase__ : List[str] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
lowerCAmelCase__ : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase__ : Tuple = image_processing(__UpperCAmelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase__ : int = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase__ : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,__UpperCAmelCase )
self.assertListEqual(encoding.boxes ,__UpperCAmelCase )
# with apply_OCR = False
lowerCAmelCase__ : Any = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase )
lowerCAmelCase__ : int = image_processing(__UpperCAmelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 565
|
'''simple docstring'''
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : str = n
lowerCAmelCase__ : Optional[Any] = [None] * self.n
lowerCAmelCase__ : Tuple = 0 # index of the first element
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Any = 0
def __len__( self ) -> int:
return self.size
def UpperCAmelCase_ ( self ) -> bool:
return self.size == 0
def UpperCAmelCase_ ( self ) -> str:
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
lowerCAmelCase__ : List[str] = data
lowerCAmelCase__ : Any = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase_ ( self ) -> List[Any]:
if self.size == 0:
raise Exception("""UNDERFLOW""" )
lowerCAmelCase__ : List[str] = self.array[self.front]
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 565
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Tuple = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCAmelCase : List[Any] = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCAmelCase : Union[str, Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase : Union[str, Any] = False
@property
def __lowerCamelCase ( self : int ) ->Dict:
return 3_2
@property
def __lowerCamelCase ( self : int ) ->Any:
return 3_2
@property
def __lowerCamelCase ( self : Dict ) ->Optional[Any]:
return self.time_input_dim
@property
def __lowerCamelCase ( self : Any ) ->str:
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self : int ) ->str:
return 1_0_0
@property
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**A )
return model
@property
def __lowerCamelCase ( self : str ) ->Any:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self : Any ) ->int:
lowerCamelCase__ : Union[str, Any] = self.dummy_unet
lowerCamelCase__ : Dict = self.dummy_movq
lowerCamelCase__ : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A , set_alpha_to_one=A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=A , )
lowerCamelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCamelCase ( self : int , A : Any , A : str=0 ) ->str:
lowerCamelCase__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A )
lowerCamelCase__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A )
# create hint
lowerCamelCase__ : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A ) ).to(A )
if str(A ).startswith('''mps''' ):
lowerCamelCase__ : Optional[int] = torch.manual_seed(A )
else:
lowerCamelCase__ : int = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase__ : int = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
lowerCamelCase__ : Dict = '''cpu'''
lowerCamelCase__ : List[str] = self.get_dummy_components()
lowerCamelCase__ : int = self.pipeline_class(**A )
lowerCamelCase__ : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : Dict = pipe(**self.get_dummy_inputs(A ) )
lowerCamelCase__ : Any = output.images
lowerCamelCase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
lowerCamelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCamelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : Any = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Union[str, Any] ) ->str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Any ) ->int:
lowerCamelCase__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
lowerCamelCase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowerCamelCase__ : Optional[int] = torch.from_numpy(np.array(A ) ).float() / 2_55.0
lowerCamelCase__ : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase__ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[int] = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowerCamelCase__ : str = '''A robot, 4k photo'''
lowerCamelCase__ : int = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Any = pipe_prior(
A , generator=A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCamelCase__ : Optional[Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCamelCase__ : Optional[int] = pipeline(
image_embeds=A , negative_image_embeds=A , hint=A , generator=A , num_inference_steps=1_0_0 , output_type='''np''' , )
lowerCamelCase__ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(A , A )
| 130
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Tuple , A : str = "▁" , A : bool = True , A : Union[str, AddedToken] = "<unk>" , A : Union[str, AddedToken] = "</s>" , A : Union[str, AddedToken] = "<pad>" , ) ->Optional[int]:
lowerCamelCase__ : str = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
lowerCamelCase__ : Optional[int] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase__ : Optional[Any] = token_dict['''token''']
lowerCamelCase__ : int = Tokenizer(Unigram() )
lowerCamelCase__ : List[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
lowerCamelCase__ : Dict = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=A , add_prefix_space=A ),
pre_tokenizers.Digits(individual_digits=A ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase__ : Optional[int] = decoders.Metaspace(replacement=A , add_prefix_space=A )
lowerCamelCase__ : Any = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
lowerCamelCase__ : List[str] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(A , A )
def __lowerCamelCase ( self : List[str] , A : Union[str, List[str]] , A : int = 8_0_0_0 , A : bool = True , ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
if isinstance(A , A ):
lowerCamelCase__ : Union[str, Any] = [files]
self._tokenizer.train(A , trainer=A )
self.add_unk_id()
def __lowerCamelCase ( self : Union[str, Any] , A : Union[Iterator[str], Iterator[Iterator[str]]] , A : int = 8_0_0_0 , A : bool = True , ) ->List[Any]:
lowerCamelCase__ : str = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
self._tokenizer.train_from_iterator(A , trainer=A )
self.add_unk_id()
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = json.loads(self._tokenizer.to_str() )
lowerCamelCase__ : str = self.special_tokens['''unk''']['''id''']
lowerCamelCase__ : List[Any] = Tokenizer.from_str(json.dumps(A ) )
| 130
| 1
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ = Lock()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A__ = min(__UpperCamelCase , __UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A__ = max(__UpperCamelCase , __UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__UpperCamelCase )
def A ( __UpperCamelCase ) -> Union[str, Any]:
A__ = []
A__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A__ = temp_rs
A__ = temp_rr
for i in range(1 , len(__UpperCamelCase ) - 1 ):
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A__ = temp_rs
A__ = temp_rr
process_array_.append(
Process(
target=__UpperCamelCase , args=(
len(__UpperCamelCase ) - 1,
arr[len(__UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__UpperCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__UpperCamelCase ) ):
A__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def A ( ) -> Optional[int]:
A__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__UpperCamelCase )
A__ = odd_even_transposition(__UpperCamelCase )
print('Sorted List\n' )
print(*__UpperCamelCase )
if __name__ == "__main__":
main()
| 9
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class _SCREAMING_SNAKE_CASE:
def __init__( self : str ) -> None:
SCREAMING_SNAKE_CASE__ :list[Any] = []
SCREAMING_SNAKE_CASE__ :int = 0
SCREAMING_SNAKE_CASE__ :int = 0
def __lowerCamelCase ( self : Any ) -> bool:
return self.head == self.tail
def __lowerCamelCase ( self : Any , UpperCamelCase_ : Any ) -> None:
self.data.append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = self.tail + 1
def __lowerCamelCase ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.data[self.head]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.head + 1
return ret
def __lowerCamelCase ( self : str ) -> int:
return self.tail - self.head
def __lowerCamelCase ( self : Optional[int] ) -> None:
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class _SCREAMING_SNAKE_CASE:
def __init__( self : List[str] , UpperCamelCase_ : Any ) -> None:
SCREAMING_SNAKE_CASE__ :Tuple = data
SCREAMING_SNAKE_CASE__ :MyNode | None = None
SCREAMING_SNAKE_CASE__ :MyNode | None = None
SCREAMING_SNAKE_CASE__ :int = 1
def __lowerCamelCase ( self : Union[str, Any] ) -> Any:
return self.data
def __lowerCamelCase ( self : Optional[int] ) -> MyNode | None:
return self.left
def __lowerCamelCase ( self : List[str] ) -> MyNode | None:
return self.right
def __lowerCamelCase ( self : List[Any] ) -> int:
return self.height
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> None:
SCREAMING_SNAKE_CASE__ :List[str] = data
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : MyNode | None ) -> None:
SCREAMING_SNAKE_CASE__ :Dict = node
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : MyNode | None ) -> None:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = node
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : int ) -> None:
SCREAMING_SNAKE_CASE__ :Dict = height
def lowerCamelCase ( UpperCAmelCase__ : MyNode | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
'''simple docstring'''
print('left rotation node:' , node.get_data() )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Dict = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
'''simple docstring'''
print('right rotation node:' , node.get_data() )
SCREAMING_SNAKE_CASE__ :Tuple = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase__ ) )
return right_rotation(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase__ ) )
return left_rotation(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : MyNode | None , UpperCAmelCase__ : Any ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(UpperCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , UpperCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE__ :Union[str, Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE__ :Dict = right_rotation(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :str = lr_rotation(UpperCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , UpperCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE__ :Optional[int] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE__ :Optional[Any] = rl_rotation(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = left_rotation(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
return node
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ :Optional[int] = root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE__ :str = right_child
return root.get_data()
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ :Optional[int] = root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE__ :List[Any] = left_child
return root.get_data()
def lowerCamelCase ( UpperCAmelCase__ : MyNode , UpperCAmelCase__ : Any ) -> MyNode | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = root.get_left()
SCREAMING_SNAKE_CASE__ :Optional[int] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE__ :int = get_left_most(UpperCAmelCase__ )
root.set_data(UpperCAmelCase__ )
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE__ :Optional[int] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE__ :Any = left_rotation(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :int = rl_rotation(UpperCAmelCase__ )
elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE__ :Any = right_rotation(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :str = lr_rotation(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase__ )
return root
class _SCREAMING_SNAKE_CASE:
def __init__( self : List[Any] ) -> None:
SCREAMING_SNAKE_CASE__ :MyNode | None = None
def __lowerCamelCase ( self : Optional[Any] ) -> int:
return get_height(self.root )
def __lowerCamelCase ( self : int , UpperCamelCase_ : Any ) -> None:
print('insert:' + str(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Dict = insert_node(self.root , UpperCamelCase_ )
def __lowerCamelCase ( self : str , UpperCamelCase_ : Any ) -> None:
print('delete:' + str(UpperCamelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
SCREAMING_SNAKE_CASE__ :List[Any] = del_node(self.root , UpperCamelCase_ )
def __str__( self : List[Any] , ) -> str: # a level traversale, gives a more intuitive look on the tree
SCREAMING_SNAKE_CASE__ :List[str] = ''
SCREAMING_SNAKE_CASE__ :Optional[Any] = MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE__ :int = self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE__ :str = 0
while not q.is_empty():
SCREAMING_SNAKE_CASE__ :Optional[int] = q.pop()
SCREAMING_SNAKE_CASE__ :List[str] = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase_ )
q.push(UpperCamelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE__ :Tuple = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , UpperCamelCase_ ) - 1:
SCREAMING_SNAKE_CASE__ :Optional[int] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCamelCase ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
UpperCamelCase_ = AVLtree()
UpperCamelCase_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 209
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[str] = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 5
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[Any] = embedding_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_hidden_groups
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Any = scope
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_choices
lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
lowerCamelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AlbertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase__ : Dict = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 5
| 1
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __SCREAMING_SNAKE_CASE ( _snake_case ):
@require_torch
def __lowerCAmelCase ( self ) -> int:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__SCREAMING_SNAKE_CASE = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__SCREAMING_SNAKE_CASE = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__SCREAMING_SNAKE_CASE = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__SCREAMING_SNAKE_CASE = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__lowerCamelCase )
BertModel.from_pretrained(__lowerCamelCase )
BertTokenizer.from_pretrained(__lowerCamelCase )
pipeline(task="fill-mask", model=__lowerCamelCase )
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__SCREAMING_SNAKE_CASE = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__SCREAMING_SNAKE_CASE = '''1'''
__SCREAMING_SNAKE_CASE = subprocess.run(__lowerCamelCase, env=__lowerCamelCase, check=__lowerCamelCase, capture_output=__lowerCamelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn("success", result.stdout.decode() )
@require_torch
def __lowerCAmelCase ( self ) -> int:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__SCREAMING_SNAKE_CASE = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__SCREAMING_SNAKE_CASE = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__SCREAMING_SNAKE_CASE = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__SCREAMING_SNAKE_CASE = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__lowerCamelCase )
BertModel.from_pretrained(__lowerCamelCase )
BertTokenizer.from_pretrained(__lowerCamelCase )
pipeline(task="fill-mask", model=__lowerCamelCase )
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__SCREAMING_SNAKE_CASE = self.get_env()
__SCREAMING_SNAKE_CASE = subprocess.run(__lowerCamelCase, env=__lowerCamelCase, check=__lowerCamelCase, capture_output=__lowerCamelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn("success", result.stdout.decode() )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__SCREAMING_SNAKE_CASE = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
__SCREAMING_SNAKE_CASE = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
__SCREAMING_SNAKE_CASE = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__SCREAMING_SNAKE_CASE = self.get_env()
__SCREAMING_SNAKE_CASE = subprocess.run(__lowerCamelCase, env=__lowerCamelCase, check=__lowerCamelCase, capture_output=__lowerCamelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn("success", result.stdout.decode() )
# next emulate no network
__SCREAMING_SNAKE_CASE = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__SCREAMING_SNAKE_CASE = '''1'''
__SCREAMING_SNAKE_CASE = subprocess.run(__lowerCamelCase, env=__lowerCamelCase, check=__lowerCamelCase, capture_output=__lowerCamelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn("success", result.stdout.decode() )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = '''
from transformers import pipeline
'''
__SCREAMING_SNAKE_CASE = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
__SCREAMING_SNAKE_CASE = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
__SCREAMING_SNAKE_CASE = self.get_env()
__SCREAMING_SNAKE_CASE = '''1'''
__SCREAMING_SNAKE_CASE = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
__SCREAMING_SNAKE_CASE = subprocess.run(__lowerCamelCase, env=__lowerCamelCase, check=__lowerCamelCase, capture_output=__lowerCamelCase )
self.assertEqual(result.returncode, 1, result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode", result.stderr.decode().replace("\n", "" ), )
@require_torch
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = '''
from transformers import AutoModel
'''
__SCREAMING_SNAKE_CASE = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__SCREAMING_SNAKE_CASE = self.get_env()
__SCREAMING_SNAKE_CASE = subprocess.run(__lowerCamelCase, env=__lowerCamelCase, check=__lowerCamelCase, capture_output=__lowerCamelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn("success", result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__SCREAMING_SNAKE_CASE = '''1'''
__SCREAMING_SNAKE_CASE = subprocess.run(__lowerCamelCase, env=__lowerCamelCase, check=__lowerCamelCase, capture_output=__lowerCamelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn("success", result.stdout.decode() )
| 693
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False, False, False
@dataclass
class UpperCAmelCase :
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = "dict"
UpperCAmelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase = field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self : Any ):
return self.pa_type
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ :int = BytesIO()
sf.write(__lowerCamelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ :List[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
UpperCAmelCase__ :Optional[Any] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7
UpperCAmelCase__ :Optional[Any] = BytesIO(bytes() )
sf.write(__lowerCamelCase , __lowerCamelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
UpperCAmelCase__ , UpperCAmelCase__ :str = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
UpperCAmelCase__ :List[str] = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
UpperCAmelCase__ :Optional[Any] = token_per_repo_id or {}
UpperCAmelCase__ :str = path.split('''::''' )[-1]
try:
UpperCAmelCase__ :Tuple = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase__ :str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ :Tuple = None
with xopen(__lowerCamelCase , '''rb''' , use_auth_token=__lowerCamelCase ) as f:
UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = sf.read(__lowerCamelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = sf.read(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = array.T
if self.mono:
UpperCAmelCase__ :Any = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ :Union[str, Any] = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate )
UpperCAmelCase__ :List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
UpperCAmelCase__ :Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ :str = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :int = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
UpperCAmelCase__ :Any = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase__ :str = storage.field('''bytes''' )
else:
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase__ :Optional[int] = storage.field('''path''' )
else:
UpperCAmelCase__ :Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Dict ):
with xopen(__lowerCamelCase , '''rb''' ) as f:
UpperCAmelCase__ :Any = f.read()
return bytes_
UpperCAmelCase__ :Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ :Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ :Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
| 467
| 0
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : str, **_UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : str, _UpperCAmelCase : Union[np.ndarray, bytes, str], **_UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().__call__(_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Union[str, Any], **_UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE__ : int = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def A_ ( self : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : int=None, _UpperCAmelCase : Optional[int]="This is a sound of {}." ) -> Any:
"""simple docstring"""
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
SCREAMING_SNAKE_CASE__ : Union[str, Any] = requests.get(_UpperCAmelCase ).content
else:
with open(_UpperCAmelCase, "rb" ) as f:
SCREAMING_SNAKE_CASE__ : int = f.read()
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = ffmpeg_read(_UpperCAmelCase, self.feature_extractor.sampling_rate )
if not isinstance(_UpperCAmelCase, np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : Dict = candidate_labels
SCREAMING_SNAKE_CASE__ : List[str] = [hypothesis_template.format(_UpperCAmelCase ) for x in candidate_labels]
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, return_tensors=self.framework, padding=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = [text_inputs]
return inputs
def A_ ( self : Any, _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = model_inputs.pop("candidate_labels" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0], _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE__ : int = text_inputs[0][0]
SCREAMING_SNAKE_CASE__ : Any = self.model(**_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def A_ ( self : Optional[int], _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = model_outputs.pop("candidate_labels" )
SCREAMING_SNAKE_CASE__ : int = model_outputs["logits"][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Tuple = logits.softmax(dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_UpperCAmelCase, _UpperCAmelCase ), key=lambda _UpperCAmelCase : -x[0] )
]
return result
| 157
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : List[Any], _UpperCAmelCase : Any=None, _UpperCAmelCase : int=None, **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def __call__( self : Optional[Any], *_UpperCAmelCase : Any, **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : str = args[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Any = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : int = encodings["input_ids"]
return inputs
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def A_ ( self : str, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any=False, _UpperCAmelCase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : List[Any] = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : str = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : List[str] = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : int = end_token.group()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : List[str] = value[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Tuple = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : str = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : List[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : int = output[key][0]
SCREAMING_SNAKE_CASE__ : int = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : Any ) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 157
| 1
|
def UpperCamelCase_( _A :Union[str, Any] )-> Union[str, Any]:
UpperCamelCase__ = [False] * len(A__ )
UpperCamelCase__ = [-1] * len(A__ )
def dfs(_A :Union[str, Any] , _A :List[str] ):
UpperCamelCase__ = True
UpperCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__UpperCamelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 551
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase_ ( A__ : str , A__ : str = "cpu" , A__ : Union[str, None] = None ):
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.load(A__ , map_location=A__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(A__ , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCAmelCase_ : List[str] = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : List[str] = src_path
torch.save(A__ , A__ )
if __name__ == "__main__":
fire.Fire(convert)
| 275
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = create_tensor(UpperCAmelCase__ )
lowerCamelCase = gather(UpperCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = [state.process_index]
lowerCamelCase = gather_object(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == state.num_processes, F"""{gathered_obj}, {len(UpperCAmelCase__ )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = create_tensor(UpperCAmelCase__ )
lowerCamelCase = broadcast(UpperCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if state.is_main_process:
lowerCamelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCamelCase = torch.arange(state.num_processes ).to(state.device )
lowerCamelCase = pad_across_processes(UpperCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if state.num_processes != 2:
return
lowerCamelCase = create_tensor(UpperCAmelCase__ )
lowerCamelCase = reduce(UpperCAmelCase__ , "sum" )
lowerCamelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}"""
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if state.num_processes != 2:
return
lowerCamelCase = create_tensor(UpperCAmelCase__ )
lowerCamelCase = reduce(UpperCAmelCase__ , "mean" )
lowerCamelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}"""
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
main()
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = PartialState()
state.print(F"""State: {state}""" )
state.print("testing gather" )
test_gather(UpperCAmelCase__ )
state.print("testing gather_object" )
test_gather_object(UpperCAmelCase__ )
state.print("testing broadcast" )
test_broadcast(UpperCAmelCase__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(UpperCAmelCase__ )
state.print("testing reduce_sum" )
test_reduce_sum(UpperCAmelCase__ )
state.print("testing reduce_mean" )
test_reduce_mean(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 484
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : int = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 484
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = UnCLIPImageVariationPipeline
a__ : Tuple = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
a__ : Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS
a__ : Union[str, Any] = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
a__ : Any = False
@property
def _lowercase (self : int ):
return 32
@property
def _lowercase (self : Union[str, Any] ):
return 32
@property
def _lowercase (self : Dict ):
return self.time_input_dim
@property
def _lowercase (self : Optional[Any] ):
return self.time_input_dim * 4
@property
def _lowercase (self : Tuple ):
return 100
@property
def _lowercase (self : List[str] ):
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowercase (self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def _lowercase (self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__a )
@property
def _lowercase (self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
UpperCAmelCase_ = UnCLIPTextProjModel(**__a )
return model
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
UpperCAmelCase_ = UNetaDConditionModel(**__a )
return model
@property
def _lowercase (self : Union[str, Any] ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowercase (self : Dict ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
UpperCAmelCase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.dummy_decoder
UpperCAmelCase_ = self.dummy_text_proj
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_super_res_first
UpperCAmelCase_ = self.dummy_super_res_last
UpperCAmelCase_ = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , )
UpperCAmelCase_ = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , )
UpperCAmelCase_ = CLIPImageProcessor(crop_size=32 , size=32 )
UpperCAmelCase_ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowercase (self : Optional[int] , __a : Any , __a : List[Any]=0 , __a : str=True ):
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
if pil_image:
UpperCAmelCase_ = input_image * 0.5 + 0.5
UpperCAmelCase_ = input_image.clamp(0 , 1 )
UpperCAmelCase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowercase (self : Dict ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(**__a )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(
**__a , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : str ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(**__a )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(
**__a , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : List[str] ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
UpperCAmelCase_ = pipe(**__a )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
UpperCAmelCase_ = pipe(
**__a , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCAmelCase_ = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Tuple ):
UpperCAmelCase_ = torch.device("cpu" )
class __A :
a__ : Optional[Any] = 1
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = pipe.decoder.dtype
UpperCAmelCase_ = 1
UpperCAmelCase_ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCAmelCase_ = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
UpperCAmelCase_ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCAmelCase_ = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
UpperCAmelCase_ = pipe(
**__a , decoder_latents=__a , super_res_latents=__a ).images
UpperCAmelCase_ = self.get_dummy_inputs(__a , pil_image=__a )
# Don't pass image, instead pass embedding
UpperCAmelCase_ = pipeline_inputs.pop("image" )
UpperCAmelCase_ = pipe.image_encoder(__a ).image_embeds
UpperCAmelCase_ = pipe(
**__a , decoder_latents=__a , super_res_latents=__a , image_embeddings=__a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCAmelCase_ = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__a , expected_max_diff=__a )
@skip_mps
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = torch_device == "cpu"
UpperCAmelCase_ = True
UpperCAmelCase_ = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , additional_params_copy_to_batched_inputs=__a , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCAmelCase_ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__a , additional_params_copy_to_batched_inputs=__a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__a )
@skip_mps
def _lowercase (self : Optional[Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowercase (self : Optional[Any] ):
return super().test_save_load_local()
@skip_mps
def _lowercase (self : int ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Any ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
UpperCAmelCase_ = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipeline(
__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__a , __a , 15 )
| 78
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''MCTCTFeatureExtractor'''
UpperCamelCase__ : Union[str, Any] = '''AutoTokenizer'''
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop('audio' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
__SCREAMING_SNAKE_CASE = kwargs.pop('input_features' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('labels' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if input_features is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__SCREAMING_SNAKE_CASE = labels['input_ids']
return input_features
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 148
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Optional[int] = ["""image_processor""", """tokenizer"""]
_snake_case : Tuple = """CLIPImageProcessor"""
_snake_case : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[Any] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=None , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Dict , lowerCamelCase : Dict=None , lowerCamelCase : Tuple=None , lowerCamelCase : Any=None , **lowerCamelCase : Any ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowercase = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if images is not None:
__lowercase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def _snake_case ( self : List[str] , *lowerCamelCase : Tuple , **lowerCamelCase : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Any , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self : str ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 655
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase )
__lowercase = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase ):
try:
__lowercase = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase )
__lowercase = new_word
if len(lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase )
__lowercase = " ".join(lowerCamelCase )
__lowercase = word
return word
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = "".join(lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__lowercase = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 655
| 1
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[Union[str, Path]] = None
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = False
UpperCamelCase_ :Optional[Dict] = None
UpperCamelCase_ :Optional[str] = None
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = True
UpperCamelCase_ :Optional[int] = None
UpperCamelCase_ :int = 1
UpperCamelCase_ :Optional[Union[str, bool]] = None
UpperCamelCase_ :bool = False
UpperCamelCase_ :Optional[Dict] = None
UpperCamelCase_ :Optional[str] = None
def __snake_case ( self : Any ):
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase__ ) for k, v in self.__dict__.items()} )
| 668
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = '''upernet'''
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type")
SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = backbone_config
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Any = pool_scales
SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels
SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs
SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input
SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type
return output
| 671
| 0
|
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
@property
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''sampling_rate''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''padding_value''' ) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Dict =feat_extract.model_input_names[0]
lowercase : Union[str, Any] =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCAmelCase ) == len(UpperCAmelCase ) for x, y in zip(UpperCAmelCase , processed_features[input_name] ) ) )
lowercase : Optional[Any] =self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
lowercase : List[Any] =BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowercase : int =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase : Optional[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
lowercase : Dict =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Union[str, Any] =feat_extract.model_input_names[0]
lowercase : List[str] =BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowercase : Tuple =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase : Tuple =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
lowercase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Tuple =feat_extract.model_input_names[0]
lowercase : str =BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
lowercase : Dict =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase : List[str] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def A__ ( self : int , UpperCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
def _inputs_have_equal_length(UpperCAmelCase : List[str] ):
lowercase : Tuple =len(input[0] )
for input_slice in input[1:]:
if len(UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : List[str] , UpperCAmelCase : Any ):
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCAmelCase , UpperCAmelCase ):
if not np.allclose(np.asarray(UpperCAmelCase ) , np.asarray(UpperCAmelCase ) , atol=1e-3 ):
return False
return True
lowercase : str =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase )
lowercase : List[str] =feat_extract.model_input_names[0]
lowercase : Any =BatchFeature({input_name: speech_inputs} )
lowercase : List[str] =self.feat_extract_tester.seq_length_diff
lowercase : Union[str, Any] =self.feat_extract_tester.max_seq_length + pad_diff
lowercase : Union[str, Any] =self.feat_extract_tester.min_seq_length
lowercase : Tuple =self.feat_extract_tester.batch_size
lowercase : List[str] =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase : Union[str, Any] =feat_extract.pad(UpperCAmelCase , padding=UpperCAmelCase )
lowercase : str =input_a[input_name]
lowercase : List[str] =feat_extract.pad(UpperCAmelCase , padding='''longest''' )
lowercase : Any =input_a[input_name]
lowercase : Tuple =feat_extract.pad(UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
lowercase : Union[str, Any] =input_a[input_name]
lowercase : Tuple =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
lowercase : str =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''max_length''' )[input_name]
lowercase : List[Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=UpperCAmelCase , return_tensors='''np''' )
lowercase : Union[str, Any] =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase : str =feat_extract.pad(UpperCAmelCase , pad_to_multiple_of=10 )
lowercase : int =input_a[input_name]
lowercase : Any =feat_extract.pad(UpperCAmelCase , padding='''longest''' , pad_to_multiple_of=10 )
lowercase : List[Any] =input_a[input_name]
lowercase : Union[str, Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=UpperCAmelCase )
lowercase : int =input_a[input_name]
lowercase : Dict =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=UpperCAmelCase , return_tensors='''np''' , )
lowercase : Any =input_a[input_name]
self.assertTrue(all(len(UpperCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
lowercase : Union[str, Any] =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase : Union[str, Any] =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[int]=False ) -> Optional[Any]:
'''simple docstring'''
def _inputs_have_equal_length(UpperCAmelCase : int ):
lowercase : List[Any] =len(input[0] )
for input_slice in input[1:]:
if len(UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : Dict , UpperCAmelCase : Any ):
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCAmelCase , UpperCAmelCase ):
if not np.allclose(np.asarray(UpperCAmelCase ) , np.asarray(UpperCAmelCase ) , atol=1e-3 ):
return False
return True
lowercase : Dict =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Union[str, Any] =self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase )
lowercase : List[str] =feat_extract.model_input_names[0]
lowercase : Union[str, Any] =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase : Union[str, Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=UpperCAmelCase )
lowercase : List[str] =input_a[input_name]
lowercase : str =feat_extract.pad(UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
lowercase : Tuple =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
# truncate to smallest with np
lowercase : Any =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=UpperCAmelCase , )
lowercase : Union[str, Any] =input_a[input_name]
lowercase : List[Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
lowercase : int =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
# truncate to middle
lowercase : Optional[Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase , return_tensors='''np''' , )
lowercase : Union[str, Any] =input_a[input_name]
lowercase : int =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase )
lowercase : str =input_a[input_name]
lowercase : str =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
lowercase : Tuple =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , truncation=UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''longest''' , truncation=UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''longest''' , truncation=UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase : int =12
lowercase : List[str] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase , truncation=UpperCAmelCase , )
lowercase : int =input_a[input_name]
lowercase : Tuple =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase , )
lowercase : Dict =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase : Dict =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase : List[Any] =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
def A__ ( self : str ) -> int:
'''simple docstring'''
self._check_padding(numpify=UpperCAmelCase )
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
self._check_padding(numpify=UpperCAmelCase )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
self._check_truncation(numpify=UpperCAmelCase )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
self._check_truncation(numpify=UpperCAmelCase )
@require_torch
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : int =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : str =feat_extract.model_input_names[0]
lowercase : Optional[int] =BatchFeature({input_name: speech_inputs} )
lowercase : Optional[Any] =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase : List[str] =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Tuple =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : Optional[Any] =feat_extract.model_input_names[0]
lowercase : int =BatchFeature({input_name: speech_inputs} )
lowercase : str =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase : List[str] =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : str =self.feat_extract_dict
lowercase : Union[str, Any] =True
lowercase : List[str] =self.feature_extraction_class(**UpperCAmelCase )
lowercase : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : int =[len(UpperCAmelCase ) for x in speech_inputs]
lowercase : Tuple =feat_extract.model_input_names[0]
lowercase : Dict =BatchFeature({input_name: speech_inputs} )
lowercase : str =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCAmelCase )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Dict =self.feat_extract_dict
lowercase : List[str] =True
lowercase : Optional[Any] =self.feature_extraction_class(**UpperCAmelCase )
lowercase : Any =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : Optional[Any] =[len(UpperCAmelCase ) for x in speech_inputs]
lowercase : Tuple =feat_extract.model_input_names[0]
lowercase : Optional[Any] =BatchFeature({input_name: speech_inputs} )
lowercase : List[str] =min(UpperCAmelCase )
lowercase : Any =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 721
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8
| 0
|
def a ( a ) ->list:
'''simple docstring'''
for i in range(len(a ) - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = unsorted[j - 1], unsorted[j]
SCREAMING_SNAKE_CASE = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = unsorted[j + 1], unsorted[j]
SCREAMING_SNAKE_CASE = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 201
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase :
UpperCamelCase_ : int
UpperCamelCase_ : int
class lowerCamelCase :
def __init__( self :Dict , lowercase :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [[] for _ in range(lowercase )]
SCREAMING_SNAKE_CASE = size
def __getitem__( self :Union[str, Any] , lowercase :int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def snake_case__ ( self :List[Any] ) -> str:
"""simple docstring"""
return self._size
def snake_case__ ( self :Optional[Any] , lowercase :int , lowercase :int , lowercase :int ) -> List[Any]:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowercase , lowercase ) )
def snake_case__ ( self :List[str] , lowercase :int , lowercase :int ) -> int | None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = deque([start_vertex] )
SCREAMING_SNAKE_CASE = [None] * self.size
SCREAMING_SNAKE_CASE = 0
while queue:
SCREAMING_SNAKE_CASE = queue.popleft()
SCREAMING_SNAKE_CASE = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE = current_distance + edge.weight
SCREAMING_SNAKE_CASE = distances[edge.destination_vertex]
if (
isinstance(lowercase , lowercase )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201
| 1
|
'''simple docstring'''
from __future__ import annotations
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((_a) , (_a)) = extended_euclid(lowerCAmelCase__ , a % b )
_a = a // b
return (y, x - k * y)
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
((_a) , (_a)) = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
_a = na * na
_a = ra * x * na + ra * y * na
return (n % m + m) % m
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
((_a) , (_a)) = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
_a = (b % n + n) % n
return b
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
_a , _a = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
_a = na * na
_a = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 532
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = DiTPipeline
_lowerCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
_a = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__magic_name__ , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=__magic_name__ , )
_a = AutoencoderKL()
_a = DDIMScheduler()
_a = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=0 ) -> Union[str, Any]:
if str(__magic_name__ ).startswith('mps' ):
_a = torch.manual_seed(__magic_name__ )
else:
_a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_a = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ) -> Tuple:
_a = 'cpu'
_a = self.get_dummy_components()
_a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
_a = self.get_dummy_inputs(__magic_name__ )
_a = pipe(**__magic_name__ ).images
_a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_a = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1e-3 )
def __UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=__magic_name__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = torch.manual_seed(0 )
_a = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_a = ['vase', 'umbrella', 'white shark', 'white wolf']
_a = pipe.get_label_ids(__magic_name__ )
_a = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(__magic_name__ , __magic_name__ ):
_a = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def __UpperCAmelCase ( self ) -> Any:
_a = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_a = ['vase', 'umbrella']
_a = pipe.get_label_ids(__magic_name__ )
_a = torch.manual_seed(0 )
_a = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(__magic_name__ , __magic_name__ ):
_a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 532
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __a , __a) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a)
@torch.no_grad()
def __call__( self , __a = 1 , __a = 20_00 , __a = None , __a = "pil" , __a = True , **__a , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCamelCase = self.unet.config.sample_size
_UpperCamelCase = (batch_size, 3, img_size, img_size)
_UpperCamelCase = self.unet
_UpperCamelCase = randn_tensor(__a , generator=__a) * self.scheduler.init_noise_sigma
_UpperCamelCase = sample.to(self.device)
self.scheduler.set_timesteps(__a)
self.scheduler.set_sigmas(__a)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
_UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
_UpperCamelCase = self.unet(__a , __a).sample
_UpperCamelCase = self.scheduler.step_correct(__a , __a , generator=__a).prev_sample
# prediction step
_UpperCamelCase = model(__a , __a).sample
_UpperCamelCase = self.scheduler.step_pred(__a , __a , __a , generator=__a)
_UpperCamelCase , _UpperCamelCase = output.prev_sample, output.prev_sample_mean
_UpperCamelCase = sample_mean.clamp(0 , 1)
_UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__a)
| 19
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[Any] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
| 0
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
A__ = 1.5
A__ = int(factor * num_class_images )
A__ = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase_ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=UpperCAmelCase_ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
A__ = client.query(text=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
A__ = int(factor * num_images )
A__ = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase_ , aesthetic_weight=0.1 , )
A__ = 0
A__ = 0
A__ = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase_ )
with open(F"""{class_data_dir}/caption.txt""" , """w""" ) as fa, open(F"""{class_data_dir}/urls.txt""" , """w""" ) as fa, open(
F"""{class_data_dir}/images.txt""" , """w""" ) as fa:
while total < num_class_images:
A__ = class_images[count]
count += 1
try:
A__ = requests.get(images["""url"""] )
if img.status_code == 200:
A__ = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _snake_case ( ):
A__ = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase_ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase_ , type=UpperCAmelCase_ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase_ , type=UpperCAmelCase_ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 500
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
A__ = [0 for i in range(r + 1 )]
# nc0 = 1
A__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A__ = min(UpperCAmelCase_ , UpperCAmelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 500
| 1
|
import torch
from transformers import AutoModel
class A ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : str="sayef/fsner-bert-base-uncased"):
super(_UpperCamelCase , self).__init__()
_lowercase: Optional[int] = AutoModel.from_pretrained(_UpperCamelCase , return_dict=_UpperCamelCase)
_lowercase: str = torch.nn.CosineSimilarity(3 , 1e-08)
_lowercase: List[Any] = torch.nn.Softmax(dim=1)
def UpperCAmelCase__ ( self : List[Any] , **_UpperCamelCase : List[Any]):
return self.bert(**_UpperCamelCase).last_hidden_state
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Optional[int]):
return token_embeddings.sum(2 , keepdim=_UpperCamelCase)
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : List[str]=1):
return self.softmax(T * self.cos(_UpperCamelCase , _UpperCamelCase))
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]):
_lowercase: Dict = W_supports["sizes"].tolist()
_lowercase: Tuple = W_supports["start_token_id"].item()
_lowercase: str = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowercase: List[str] = self.BERT(**_UpperCamelCase)
_lowercase: str = self.BERT(**_UpperCamelCase)
_lowercase: Any = None
_lowercase: Optional[int] = None
_lowercase: List[Any] = W_supports["input_ids"] == start_token_id
_lowercase: Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_UpperCamelCase):
if i == 0:
_lowercase: Optional[int] = 0
else:
_lowercase: int = support_sizes[i - 1]
_lowercase: List[str] = S[s : s + size][start_token_masks[s : s + size]]
_lowercase: str = S[s : s + size][end_token_masks[s : s + size]]
_lowercase: List[str] = torch.matmul(q[i] , s_start.T).sum(1).softmax(0)
_lowercase: Optional[int] = torch.matmul(q[i] , s_end.T).sum(1).softmax(0)
if p_starts is not None:
_lowercase: List[str] = torch.vstack((p_starts, p_start))
_lowercase: Union[str, Any] = torch.vstack((p_ends, p_end))
else:
_lowercase: Union[str, Any] = p_start
_lowercase: Union[str, Any] = p_end
return p_starts, p_ends
| 226
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 226
| 1
|
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ):
__a : Optional[int] = [[float("""inf""" ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
__a : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCamelCase ):
# looping through rows of graph array
for i in range(_lowerCamelCase ):
# looping through columns of graph array
for j in range(_lowerCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__a : Any = dist[i][k] + dist[k][j]
_print_dist(_lowerCamelCase , _lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowercase__ = int(input("Enter number of vertices: "))
lowercase__ = int(input("Enter number of edges: "))
lowercase__ = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
lowercase__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
lowercase__ = int(input("Enter source:"))
lowercase__ = int(input("Enter destination:"))
lowercase__ = float(input("Enter weight:"))
lowercase__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 721
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=14 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Union[str, Any]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = use_mc_token_ids
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase_ ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , *_lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase_ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def lowerCAmelCase_ ( self : int ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
@slow
def lowerCAmelCase_ ( self : str ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : int ):
pass
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_lowerCAmelCase ) # Legal the president is
SCREAMING_SNAKE_CASE_ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase )
| 31
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""gpt-neox-20b""": 2_048,
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple="<|endoftext|>" , __UpperCamelCase : int="<|endoftext|>" , __UpperCamelCase : Dict="<|endoftext|>" , __UpperCamelCase : Union[str, Any]=False , **__UpperCamelCase : Union[str, Any] , ) -> Any:
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space:
_UpperCamelCase = getattr(__UpperCamelCase , pre_tok_state.pop('''type''' ) )
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**__UpperCamelCase )
_UpperCamelCase = add_prefix_space
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : "Conversation" ) -> List[int]:
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 420
| 0
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase__ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase__ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase__ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase__ = shift_tokens_right(__a , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase__ = model(__a , decoder_input_ids=__a ).logits
lowerCamelCase__ = optax.softmax_cross_entropy(__a , onehot(__a , logits.shape[-1] ) ).mean()
lowerCamelCase__ = -(labels.shape[-1] * loss.item())
lowerCamelCase__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_a = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.task_name.lower()
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
lowerCAmelCase_ = """test"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = glue_processors[args.task_name]()
lowerCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ = examples[:limit_length]
lowerCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46
| 0
|
from __future__ import annotations
from random import choice
def A ( __UpperCamelCase ) -> Optional[Any]:
return choice(__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = random_pivot(__UpperCamelCase )
# partition based on pivot
# linear time
A__ = [e for e in lst if e < pivot]
A__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__UpperCamelCase ) < k - 1:
return kth_number(__UpperCamelCase , k - len(__UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 0
|
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''', [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''', ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def a ( __snake_case : Tuple, __snake_case : str, __snake_case : Dict ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config, '''IN_MEMORY_MAX_SIZE''', __snake_case )
UpperCAmelCase_ :Optional[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCAmelCase_ :Union[str, Any] = dataset_size < in_memory_max_size
else:
UpperCAmelCase_ :Union[str, Any] = False
UpperCAmelCase_ :Optional[Any] = is_small_dataset(__snake_case )
assert result == expected
| 608
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def a ( __snake_case : Callable, __snake_case : float, __snake_case : float, __snake_case : float, __snake_case : float ):
'''simple docstring'''
UpperCAmelCase_ :str = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase_ :int = np.zeros((n + 1,) )
UpperCAmelCase_ :str = ya
UpperCAmelCase_ :List[str] = xa
for k in range(__snake_case ):
UpperCAmelCase_ :Tuple = y[k] + step_size * ode_func(__snake_case, y[k] )
UpperCAmelCase_ :Optional[int] = y[k] + (
(step_size / 2) * (ode_func(__snake_case, y[k] ) + ode_func(x + step_size, __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
UpperCamelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowercase_ )] )
UpperCamelCase = np.array(lowercase_ )
UpperCamelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowercase_ ) ) , x.transpose() ) , lowercase_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
UpperCamelCase = (1, 2, 1)
UpperCamelCase = (1, 1, 0, 7)
UpperCamelCase = SARIMAX(
lowercase_ , exog=lowercase_ , order=lowercase_ , seasonal_order=lowercase_ )
UpperCamelCase = model.fit(disp=lowercase_ , maxiter=600 , method="nm" )
UpperCamelCase = model_fit.predict(1 , len(lowercase_ ) , exog=[test_match] )
return result[0]
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
UpperCamelCase = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowercase_ , lowercase_ )
UpperCamelCase = regressor.predict(lowercase_ )
return y_pred[0]
def __magic_name__ ( lowercase_ ) -> float:
'''simple docstring'''
train_user.sort()
UpperCamelCase = np.percentile(lowercase_ , 25 )
UpperCamelCase = np.percentile(lowercase_ , 75 )
UpperCamelCase = qa - qa
UpperCamelCase = qa - (iqr * 0.1)
return low_lim
def __magic_name__ ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase = not_safe + 1
else:
if abs(abs(lowercase_ ) - abs(lowercase_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__a : Any = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
__a : Union[str, Any] = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
__a : Optional[int] = Normalizer().fit_transform(data_input_df.values)
# split data
__a : Optional[Any] = normalize_df[:, 2].tolist()
__a : str = normalize_df[:, 0].tolist()
__a : Optional[int] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__a : Union[str, Any] = normalize_df[:, [1, 2]].tolist()
__a : Any = x[: len(x) - 1]
__a : List[str] = x[len(x) - 1 :]
# for linear regression & sarimax
__a : Optional[int] = total_date[: len(total_date) - 1]
__a : int = total_user[: len(total_user) - 1]
__a : int = total_match[: len(total_match) - 1]
__a : str = total_date[len(total_date) - 1 :]
__a : Any = total_user[len(total_user) - 1 :]
__a : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__a : Optional[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__a : int = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 414
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__a : int = 0
__a : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__a : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__a : Union[str, Any] = tuple[int, int]
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
UpperCamelCase = pos_x
UpperCamelCase = pos_y
UpperCamelCase = (pos_y, pos_x)
UpperCamelCase = goal_x
UpperCamelCase = goal_y
UpperCamelCase = g_cost
UpperCamelCase = parent
UpperCamelCase = self.calculate_heuristic()
UpperCamelCase = self.g_cost + self.h_cost
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
UpperCamelCase = self.pos_x - self.goal_x
UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE ) + abs(SCREAMING_SNAKE_CASE )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE )
UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE )
UpperCamelCase = [self.start]
UpperCamelCase = []
UpperCamelCase = False
def __lowerCAmelCase ( self ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE )
self.closed_nodes.append(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_successors(SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
return [self.start.pos]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
UpperCamelCase = []
for action in delta:
UpperCamelCase = parent.pos_x + action[1]
UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[TPosition]:
"""simple docstring"""
UpperCamelCase = node
UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase = current_node.parent
path.reverse()
return path
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = AStar(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = AStar(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = False
def __lowerCAmelCase ( self ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE )
UpperCamelCase = current_bwd_node
UpperCamelCase = current_fwd_node
UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[TPosition]:
"""simple docstring"""
UpperCamelCase = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__a : List[Any] = (0, 0)
__a : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__a : str = time.time()
__a : Any = AStar(init, goal)
__a : List[Any] = a_star.search()
__a : Dict = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__a : List[str] = time.time()
__a : Optional[int] = BidirectionalAStar(init, goal)
__a : Any = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 414
| 1
|
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ )-> list:
"""simple docstring"""
if len(UpperCAmelCase_ ) < 2:
return collection
def circle_sort_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> bool:
UpperCamelCase = False
if low == high:
return swapped
UpperCamelCase = low
UpperCamelCase = high
while left < right:
if collection[left] > collection[right]:
UpperCamelCase , UpperCamelCase = (
collection[right],
collection[left],
)
UpperCamelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCamelCase , UpperCamelCase = (
collection[right + 1],
collection[left],
)
UpperCamelCase = True
UpperCamelCase = low + int((high - low) / 2 )
UpperCamelCase = circle_sort_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = circle_sort_util(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
return swapped or left_swap or right_swap
UpperCamelCase = True
while is_not_sorted is True:
UpperCamelCase = circle_sort_util(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) - 1 )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 554
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowerCamelCase__ ( )-> Tuple:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=UpperCAmelCase_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=UpperCAmelCase_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=UpperCAmelCase_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=UpperCAmelCase_ , default=10_00 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=UpperCAmelCase_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=UpperCAmelCase_ , default=5_12 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=UpperCAmelCase_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
UpperCamelCase = parser.parse_args()
return args
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
def fn(UpperCAmelCase_ ):
return tokenizer(examples["text"] )
return fn
def lowerCamelCase__ ( UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCamelCase = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCamelCase = tf.train.Features(feature=UpperCAmelCase_ )
UpperCamelCase = tf.train.Example(features=UpperCAmelCase_ )
UpperCamelCase = example.SerializeToString()
records.append(UpperCAmelCase_ )
return records
def lowerCamelCase__ ( UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase = min(len(UpperCAmelCase_ ) , args.limit )
UpperCamelCase = dataset.select(range(UpperCAmelCase_ ) )
print(F"Limiting the dataset to {args.limit} entries." )
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
else:
UpperCamelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase = tokenize_function(UpperCAmelCase_ )
UpperCamelCase = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCAmelCase_ ):
# Concatenate all texts.
UpperCamelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCAmelCase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase = dataset_tokenized.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=10_00 , num_proc=4 )
UpperCamelCase = 0
UpperCamelCase = 0
for shard in range(0 , len(UpperCAmelCase_ ) , args.shard_size ):
UpperCamelCase = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase = len(dataset_snapshot["input_ids"] )
UpperCamelCase = os.path.join(UpperCAmelCase_ , F"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase = get_serialized_examples(UpperCAmelCase_ )
with tf.io.TFRecordWriter(UpperCAmelCase_ ) as out_file:
for i in range(len(UpperCAmelCase_ ) ):
UpperCamelCase = serialized_examples[i]
out_file.write(UpperCAmelCase_ )
print("Wrote file {} containing {} records".format(UpperCAmelCase_ , UpperCAmelCase_ ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , "w" ) as f:
print(F"Total {args.split} records: {total_records}" , file=UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = parse_args()
main(args)
| 554
| 1
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :str = '''char'''
lowerCamelCase_ :Tuple = '''bpe'''
lowerCamelCase_ :Dict = '''wp'''
snake_case__ : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Union[str, Any] = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase_ :Dict = '''ViTImageProcessor'''
lowerCamelCase_ :Tuple = '''MgpstrTokenizer'''
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
UpperCAmelCase_ : Tuple = kwargs.pop('feature_extractor' )
UpperCAmelCase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
UpperCAmelCase_ : Dict = tokenizer
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('gpt2' )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
UpperCAmelCase_ : str = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None:
UpperCAmelCase_ : Optional[int] = self.char_tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase_ : Optional[int] = encodings['input_ids']
return inputs
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = sequences
UpperCAmelCase_ : Tuple = char_preds.size(0 )
UpperCAmelCase_ , UpperCAmelCase_ : str = self._decode_helper(snake_case_ , 'char' )
UpperCAmelCase_ , UpperCAmelCase_ : int = self._decode_helper(snake_case_ , 'bpe' )
UpperCAmelCase_ , UpperCAmelCase_ : int = self._decode_helper(snake_case_ , 'wp' )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = []
for i in range(snake_case_ ):
UpperCAmelCase_ : Dict = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase_ : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase_ : str = scores.index(max(snake_case_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : int = final_strs
UpperCAmelCase_ : Optional[int] = final_scores
UpperCAmelCase_ : Tuple = char_strs
UpperCAmelCase_ : Optional[Any] = bpe_strs
UpperCAmelCase_ : Any = wp_strs
return out
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
UpperCAmelCase_ : Any = self.char_decode
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Dict = '[s]'
elif format == DecodeType.BPE:
UpperCAmelCase_ : Optional[int] = self.bpe_decode
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : Dict = '#'
elif format == DecodeType.WORDPIECE:
UpperCAmelCase_ : List[str] = self.wp_decode
UpperCAmelCase_ : Optional[int] = 1_0_2
UpperCAmelCase_ : Tuple = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
UpperCAmelCase_ , UpperCAmelCase_ : str = [], []
UpperCAmelCase_ : List[str] = pred_logits.size(0 )
UpperCAmelCase_ : Dict = pred_logits.size(1 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = pred_logits.topk(1 , dim=-1 , largest=snake_case_ , sorted=snake_case_ )
UpperCAmelCase_ : str = preds_index.view(-1 , snake_case_ )[:, 1:]
UpperCAmelCase_ : Optional[Any] = decoder(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = torch.nn.functional.softmax(snake_case_ , dim=2 ).max(dim=2 )
UpperCAmelCase_ : List[Any] = preds_max_prob[:, 1:]
for index in range(snake_case_ ):
UpperCAmelCase_ : Tuple = preds_str[index].find(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = preds_str[index][:pred_eos]
UpperCAmelCase_ : Dict = preds_index[index].cpu().tolist()
UpperCAmelCase_ : List[str] = pred_index.index(snake_case_ ) if eos_token in pred_index else -1
UpperCAmelCase_ : Union[str, Any] = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase_ : Optional[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(snake_case_ )
conf_scores.append(snake_case_ )
return dec_strs, conf_scores
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(snake_case_ )]
return decode_strs
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(snake_case_ )]
return decode_strs
| 389
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ :Optional[int] = '''BlipImageProcessor'''
lowerCamelCase_ :Union[str, Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = False
super().__init__(snake_case_ , snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCAmelCase_ : str = self.tokenizer
UpperCAmelCase_ : Optional[int] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
UpperCAmelCase_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
UpperCAmelCase_ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.model_input_names
UpperCAmelCase_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 389
| 1
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,) -> list[float]:
lowercase__ , lowercase__ : Optional[Any] = coefficient_matrix.shape
lowercase__ , lowercase__ : Dict = constant_matrix.shape
if rowsa != colsa:
lowercase__ : List[Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
if colsa != 1:
lowercase__ : str = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
if rowsa != rowsa:
lowercase__ : int = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != rowsa:
lowercase__ : Tuple = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
lowercase__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
lowercase__ , lowercase__ : Union[str, Any] = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : List[Any] = []
for row in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : int = 0
for col in range(SCREAMING_SNAKE_CASE_ ):
if col == row:
lowercase__ : Any = table[row][col]
elif col == cols - 1:
lowercase__ : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase__ : Optional[int] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = new_val
return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowercase__ , lowercase__ : Optional[Any] = table.shape
lowercase__ : Tuple = True
for i in range(0 ,SCREAMING_SNAKE_CASE_ ):
lowercase__ : Tuple = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 397
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Optional[Any] = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 397
| 1
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
lowerCAmelCase__ = (
'Wrong input data\'s dimensions... '
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase__ = (
'Wrong input data\'s shape... '
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
lowerCAmelCase__ = (
'Input data have different datatype... '
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(snake_case__ )
lowerCAmelCase__ = []
for value in value_array:
lowerCAmelCase__ = euclidean(snake_case__ , dataset[0] )
lowerCAmelCase__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase__ = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
lowerCAmelCase__ = temp_dist
lowerCAmelCase__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> float:
"""simple docstring"""
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
def UpperCAmelCase_ ( snake_case__ = 200 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCAmelCase__ = [0] * (pence + 1)
lowerCAmelCase__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 604
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = b.T
SCREAMING_SNAKE_CASE : Optional[int] = np.sum(np.square(UpperCAmelCase__ ) , axis=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = np.sum(np.square(UpperCAmelCase__ ) , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.matmul(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE : Tuple = squared_euclidean_distance(UpperCAmelCase__ , UpperCAmelCase__ )
return np.argmin(UpperCAmelCase__ , axis=1 )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ['pixel_values']
def __init__( self : Tuple , UpperCamelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''height''': 256, '''width''': 256}
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = np.array(UpperCamelCase__ ) if clusters is not None else None
SCREAMING_SNAKE_CASE : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE : Optional[int] = size
SCREAMING_SNAKE_CASE : Any = resample
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : Tuple = do_color_quantize
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
UpperCamelCase__ , size=(size['''height'''], size['''width''']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = rescale(image=UpperCamelCase__ , scale=1 / 127.5 , data_format=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = image - 1
return image
def __A ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE : Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE : Optional[int] = np.array(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Optional[int] = [self.normalize(image=UpperCamelCase__ ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE : str = [to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE : List[Any] = np.array(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = color_quantize(UpperCamelCase__ , UpperCamelCase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE : Optional[Any] = images.shape[0]
SCREAMING_SNAKE_CASE : int = images.reshape(UpperCamelCase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE : List[str] = list(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Any = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 248
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a = get_logger()
a = None
class UpperCamelCase__ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
super().__init__(features=UpperCamelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCamelCase__ )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
lowercase_ = device if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
lowercase_ = str(jax.devices()[0] )
lowercase_ = jnp_array_kwargs
@staticmethod
def UpperCAmelCase__ ( ):
'''simple docstring'''
import jax
return {str(UpperCamelCase__ ): device for device in jax.devices()}
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and column:
if all(
isinstance(UpperCamelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase__ , axis=0 )
return column
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , (str, bytes, type(UpperCamelCase__ )) ):
return value
elif isinstance(UpperCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ = {}
if isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase_ = {"""dtype""": jnp.intaa}
else:
lowercase_ = {"""dtype""": jnp.intaa}
elif isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowercase_ = np.asarray(UpperCamelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Any ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase__ , """__array__""" ) and not isinstance(UpperCamelCase__ , jax.Array ):
lowercase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCamelCase__ , map_list=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : pa.Table ):
'''simple docstring'''
lowercase_ = self.numpy_arrow_extractor().extract_row(UpperCamelCase__ )
lowercase_ = self.python_features_decoder.decode_row(UpperCamelCase__ )
return self.recursive_tensorize(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : pa.Table ):
'''simple docstring'''
lowercase_ = self.numpy_arrow_extractor().extract_column(UpperCamelCase__ )
lowercase_ = self.python_features_decoder.decode_column(UpperCamelCase__ , pa_table.column_names[0] )
lowercase_ = self.recursive_tensorize(UpperCamelCase__ )
lowercase_ = self._consolidate(UpperCamelCase__ )
return column
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : pa.Table ):
'''simple docstring'''
lowercase_ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase__ )
lowercase_ = self.python_features_decoder.decode_batch(UpperCamelCase__ )
lowercase_ = self.recursive_tensorize(UpperCamelCase__ )
for column_name in batch:
lowercase_ = self._consolidate(batch[column_name] )
return batch
| 412
| 0
|
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise ValueError("""multiplicative_persistence() only accepts integral values""")
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""")
snake_case__ : Tuple = 0
snake_case__ : Dict = str(UpperCAmelCase_)
while len(UpperCAmelCase_) != 1:
snake_case__ : Union[str, Any] = [int(UpperCAmelCase_) for i in num_string]
snake_case__ : Any = 1
for i in range(0 , len(UpperCAmelCase_)):
total *= numbers[i]
snake_case__ : Optional[int] = str(UpperCAmelCase_)
steps += 1
return steps
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise ValueError("""additive_persistence() only accepts integral values""")
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""")
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = str(UpperCAmelCase_)
while len(UpperCAmelCase_) != 1:
snake_case__ : List[Any] = [int(UpperCAmelCase_) for i in num_string]
snake_case__ : List[Any] = 0
for i in range(0 , len(UpperCAmelCase_)):
total += numbers[i]
snake_case__ : Optional[int] = str(UpperCAmelCase_)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowercase_: Union[str, Any] = logging.get_logger(__name__)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , np.ndarray):
return list(tensor.shape)
snake_case__ : List[Any] = tf.shape(UpperCAmelCase_)
if tensor.shape == tf.TensorShape(UpperCAmelCase_):
return dynamic
snake_case__ : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase_)]
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase_ , name=UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=1e-5 , UpperCAmelCase_=-1):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""")
# Get mean and variance on the axis to be normalized
snake_case__ , snake_case__ : Any = tf.nn.moments(UpperCAmelCase_ , axes=[axis] , keepdims=UpperCAmelCase_)
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case__ : Optional[Any] = [1] * inputs.shape.rank
snake_case__ : Optional[int] = shape_list(UpperCAmelCase_)[axis]
snake_case__ : Tuple = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_)
snake_case__ : Tuple = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_)
# Compute layer normalization using the batch_normalization
# function.
snake_case__ : List[str] = tf.nn.batch_normalization(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , offset=UpperCAmelCase_ , scale=UpperCAmelCase_ , variance_epsilon=UpperCAmelCase_ , )
return outputs
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_=0 , UpperCAmelCase_=-1):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case__ : Optional[int] = tf.shape(UpperCAmelCase_)
snake_case__ : List[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1])
snake_case__ : Optional[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0)
return tf.reshape(UpperCAmelCase_ , UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , tf.Tensor):
snake_case__ : int = tf.convert_to_tensor(UpperCAmelCase_) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case__ : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case__ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case__ : Dict = (
tf.cast(1 , encoder_attention_mask.dtype) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = "input_ids"):
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase_ , tf.cast(UpperCAmelCase_ , dtype=tensor.dtype) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase_)}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[str] = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case__ : Tuple = [x for x in data if len(UpperCAmelCase_) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}')
snake_case__ : Optional[int] = np.asarray(UpperCAmelCase_)
snake_case__ : Tuple = 1
snake_case__ : Optional[Any] = np.array_split(UpperCAmelCase_ , UpperCAmelCase_)
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):
num_chunks += 1
snake_case__ : Tuple = np.array_split(UpperCAmelCase_ , UpperCAmelCase_)
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase_):
snake_case__ : List[str] = chunk_data
else:
snake_case__ : Union[str, Any] = data
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if name in group.attrs:
snake_case__ : int = [n.decode("""utf8""") if hasattr(UpperCAmelCase_ , """decode""") else n for n in group.attrs[name]]
else:
snake_case__ : Tuple = []
snake_case__ : Optional[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""") if hasattr(UpperCAmelCase_ , """decode""") else n for n in group.attrs["""%s%d""" % (name, chunk_id)]])
chunk_id += 1
return data
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase_):
if isinstance(UpperCAmelCase_ , tf.Tensor) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase_ , axis=-1)
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase_)
| 127
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE_ (_lowerCamelCase ):
'''simple docstring'''
_a = None
_a = None
_a = None
_a = None
class SCREAMING_SNAKE_CASE_ (_lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , __a : int=1 , __a : Union[str, Any]=0 , __a : Optional[Any]=2 , __a : Any=512 , __a : Optional[Any]="cls" , __a : int=False , __a : Dict=True , **__a : int , ) ->Dict:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[int] = project_dim
lowerCamelCase_ : Dict = pooler_fn
lowerCamelCase_ : Optional[int] = learn_encoder
lowerCamelCase_ : Tuple = use_attention_mask
class SCREAMING_SNAKE_CASE_ (_lowerCamelCase ):
'''simple docstring'''
_a = [r'''pooler''', r'''logit_scale''']
_a = [r'''position_ids''', r'''predictions.decoder.bias''']
_a = '''roberta'''
_a = RobertaSeriesConfig
def __init__( self : str , __a : Dict ) ->Tuple:
super().__init__(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : int = XLMRobertaModel(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase_ : List[Any] = getattr(_SCREAMING_SNAKE_CASE , """has_pre_transformation""" , _SCREAMING_SNAKE_CASE )
if self.has_pre_transformation:
lowerCamelCase_ : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase_ : Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowerCAmelCase ( self : List[Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ) ->Optional[int]:
lowerCamelCase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : Tuple = self.base_model(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , inputs_embeds=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_SCREAMING_SNAKE_CASE , )
if self.has_pre_transformation:
lowerCamelCase_ : List[str] = outputs['hidden_states'][-2]
lowerCamelCase_ : str = self.pre_LN(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[str] = self.transformation_pre(_SCREAMING_SNAKE_CASE )
return TransformationModelOutput(
projection_state=_SCREAMING_SNAKE_CASE , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowerCamelCase_ : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_SCREAMING_SNAKE_CASE , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 278
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = '''xmod'''
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=30_522 , _SCREAMING_SNAKE_CASE : Tuple=768 , _SCREAMING_SNAKE_CASE : Optional[int]=12 , _SCREAMING_SNAKE_CASE : List[str]=12 , _SCREAMING_SNAKE_CASE : Optional[Any]=3_072 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : str=512 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.0_2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1E-1_2 , _SCREAMING_SNAKE_CASE : List[str]=1 , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : Optional[Any]=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Tuple=("en_XX",) , _SCREAMING_SNAKE_CASE : Optional[int]=None , **_SCREAMING_SNAKE_CASE : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE : int = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE : List[Any] = pre_norm
SCREAMING_SNAKE_CASE : int = adapter_reduction_factor
SCREAMING_SNAKE_CASE : List[Any] = adapter_layer_norm
SCREAMING_SNAKE_CASE : Any = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE : Any = ln_before_adapter
SCREAMING_SNAKE_CASE : Tuple = list(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = default_language
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 265
| 0
|
# Algorithm for the pigeonhole sorting
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Union[str, Any] =min(_A ) # min() finds the minimum value
snake_case__ : str =max(_A ) # max() finds the maximum value
snake_case__ : List[Any] =max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
snake_case__ : int =[0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_A , _A ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
snake_case__ : str =0
for count in range(_A ):
while holes[count] > 0:
holes[count] -= 1
snake_case__ : Any =count + min_val
i += 1
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : Optional[int] =[8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_A )
print('''Sorted order is:''' , ''' '''.join(_A ) )
if __name__ == "__main__":
main()
| 709
|
def lowercase_ ( SCREAMING_SNAKE_CASE : int = 10_00 ):
"""simple docstring"""
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 408
| 0
|
def lowerCamelCase__ ( _lowerCamelCase = 1000 ) ->int:
return sum(e for e in range(3 , _lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 408
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
# Load configuration defined in the metadata file
with open(_lowerCamelCase ) as metadata_file:
_UpperCAmelCase =json.load(_lowerCamelCase )
_UpperCAmelCase =LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase =torch.load(_lowerCamelCase , map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCAmelCase =load_original_entity_vocab(_lowerCamelCase )
# add an entry for [MASK2]
_UpperCAmelCase =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase =XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase =AddedToken("<ent>" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
_UpperCAmelCase =AddedToken("<ent2>" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , "tokenizer_config.json" ) , "r" ) as f:
_UpperCAmelCase =json.load(_lowerCamelCase )
_UpperCAmelCase ="MLukeTokenizer"
with open(os.path.join(_lowerCamelCase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase =MLukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
_UpperCAmelCase =tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCAmelCase =tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCAmelCase =state_dict["embeddings.word_embeddings.weight"]
_UpperCAmelCase =word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase =word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase =state_dict[bias_name]
_UpperCAmelCase =decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase =decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase =F"encoder.layer.{layer_index}.attention.self."
_UpperCAmelCase =state_dict[prefix + matrix_name]
_UpperCAmelCase =state_dict[prefix + matrix_name]
_UpperCAmelCase =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase =state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCAmelCase =entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCAmelCase =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase =state_dict["entity_predictions.bias"]
_UpperCAmelCase =entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCAmelCase =torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase =LukeForMaskedLM(config=_lowerCamelCase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCAmelCase =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCAmelCase =state_dict[key]
else:
_UpperCAmelCase =state_dict[key]
_UpperCAmelCase , _UpperCAmelCase =model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if set(_lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(_lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase =MLukeTokenizer.from_pretrained(_lowerCamelCase , task="entity_classification" )
_UpperCAmelCase ="ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCAmelCase =(0, 9)
_UpperCAmelCase =tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
_UpperCAmelCase =model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase =torch.Size((1, 33, 768) )
_UpperCAmelCase =torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase =torch.Size((1, 1, 768) )
_UpperCAmelCase =torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase =MLukeTokenizer.from_pretrained(_lowerCamelCase )
_UpperCAmelCase ="Tokyo is the capital of <mask>."
_UpperCAmelCase =(24, 30)
_UpperCAmelCase =tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
_UpperCAmelCase =model(**_lowerCamelCase )
_UpperCAmelCase =encoding["input_ids"][0].tolist()
_UpperCAmelCase =input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCAmelCase =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowerCamelCase )
_UpperCAmelCase =outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
_UpperCAmelCase =["[MASK]", "[PAD]", "[UNK]"]
_UpperCAmelCase =[json.loads(_lowerCamelCase ) for line in open(_lowerCamelCase )]
_UpperCAmelCase ={}
for entry in data:
_UpperCAmelCase =entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase =entity_id
break
_UpperCAmelCase =F"{language}:{entity_name}"
_UpperCAmelCase =entity_id
return new_mapping
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
snake_case__ : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 408
| 1
|
"""simple docstring"""
from __future__ import annotations
class __lowercase :
def __init__( self : Tuple ,A : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(A ) != 0:
UpperCAmelCase__ : Optional[int] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(A ) != cols:
raise error
for value in row:
if not isinstance(A ,(int, float) ):
raise error
UpperCAmelCase__ : int = rows
else:
UpperCAmelCase__ : int = []
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return len(self.rows )
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return len(self.rows[0] )
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.order[0] == self.order[1]
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(A )
def __lowercase ( self : Dict ):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __lowercase ( self : Dict ):
'''simple docstring'''
return bool(self.determinant() )
def __lowercase ( self : str ,A : int ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(A ).determinant()
def __lowercase ( self : List[Any] ,A : int ,A : int ):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(A ,A )
return -1 * self.get_minor(A ,A )
def __lowercase ( self : str ):
'''simple docstring'''
return Matrix(
[
[self.get_minor(A ,A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return str(self.rows )
def __str__( self : Dict ):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(A ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def __lowercase ( self : List[str] ,A : list[int] ,A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : str = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(A ,A ):
raise type_error
for value in row:
if not isinstance(A ,(int, float) ):
raise type_error
if len(A ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(A )
else:
UpperCAmelCase__ : str = self.rows[0:position] + [row] + self.rows[position:]
def __lowercase ( self : Union[str, Any] ,A : list[int] ,A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(A ,A ):
raise type_error
for value in column:
if not isinstance(A ,(int, float) ):
raise type_error
if len(A ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
UpperCAmelCase__ : Optional[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ : Optional[int] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Union[str, Any] ,A : object ):
'''simple docstring'''
if not isinstance(A ,A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Optional[Any] ,A : object ):
'''simple docstring'''
return not self == other
def __neg__( self : Any ):
'''simple docstring'''
return self * -1
def __add__( self : Dict ,A : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Union[str, Any] ,A : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : int ,A : Matrix | int | float ):
'''simple docstring'''
if isinstance(A ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(A ,A ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(A ,A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Optional[int] ,A : int ):
'''simple docstring'''
if not isinstance(A ,A ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
UpperCAmelCase__ : List[str] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __lowercase ( cls : int ,A : list[int] ,A : list[int] ):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : int = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase__ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Any = sorted([comment for comment in issue.get_comments()] , key=lambda __UpperCamelCase : i.created_at , reverse=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 194
| 1
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __A ( a_ : List[str] , a_ : List[str]=False )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = OmegaConf.load(a_ )
if display:
print(yaml.dump(OmegaConf.to_container(a_ ) ) )
return config
def __A ( a_ : Optional[Any] , a_ : Dict=None , a_ : int=None )-> str:
'''simple docstring'''
if conf_path is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.yaml'''
SCREAMING_SNAKE_CASE : str = load_config(a_ , display=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE : List[str] = '''./model_checkpoints/vqgan_only.pt'''
SCREAMING_SNAKE_CASE : int = torch.load(a_ , map_location=a_ )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE : Optional[int] = sd['''state_dict''']
model.load_state_dict(a_ , strict=a_ )
model.to(a_ )
del sd
return model
def __A ( a_ : List[Any] , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = model.encode(a_ )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
SCREAMING_SNAKE_CASE : Any = model.decode(a_ )
return xrec
def __A ( a_ : Tuple , a_ : Optional[int]=False )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = string.rsplit('''.''' , 1 )
if reload:
SCREAMING_SNAKE_CASE : Dict = importlib.import_module(a_ )
importlib.reload(a_ )
return getattr(importlib.import_module(a_ , package=a_ ) , cls )
def __A ( a_ : List[Any] )-> int:
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def __A ( a_ : Dict , a_ : Any , a_ : List[str]=True , a_ : Any=True )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = instantiate_from_config(a_ )
if sd is not None:
model.load_state_dict(a_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __A ( a_ : int , a_ : Dict , a_ : str , a_ : List[Any] )-> Dict:
'''simple docstring'''
if ckpt:
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(a_ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : List[str] = pl_sd['''global_step''']
print(F"loaded model from global step {global_step}." )
else:
SCREAMING_SNAKE_CASE : str = {'''state_dict''': None}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=a_ , eval_mode=a_ )['''model''']
return model, global_step
| 698
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( a_ : Optional[int] , a_ : str , a_ : str , a_ : str , a_ : List[str] )-> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( a_ : Optional[Any] , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.feature_extractor
SCREAMING_SNAKE_CASE : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(a_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE : str = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __A ( a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any , a_ : Any )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
else:
SCREAMING_SNAKE_CASE : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : int = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a_ , a_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Any = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Any , a_ : Tuple , a_ : int , a_ : Any , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] , )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartConfig.from_pretrained(a_ )
# load model
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE : int = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : str = WavaVecaModel(a_ )
recursively_load_weights_wavaveca(model.encoder , a_ )
# load decoder weights
SCREAMING_SNAKE_CASE : Dict = MBartForCausalLM(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(a_ )
tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''mbart50'''
SCREAMING_SNAKE_CASE : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : List[str] = 25_00_04
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Any = SpeechEncoderDecoderConfig.from_dict(a_ )
hf_wavavec.save_pretrained(a_ )
feature_extractor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase__ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 698
| 1
|
from collections import namedtuple
UpperCamelCase = namedtuple('from_to', 'from_ to')
UpperCamelCase = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ", ".join(__snake_case ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ", ".join(__snake_case ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "ViTImageProcessor"
snake_case__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : str=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None:
lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None and images is not None:
lowerCAmelCase__ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase__ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def a ( self : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : str , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def a ( self : Dict ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def a ( self : Union[str, Any] ) -> int:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 125
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[str] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
A__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 672
| 0
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case:
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=False , A_=False , A_=False , A_=2 , A_=99 , A_=0 , A_=32 , A_=5 , A_=4 , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=2 , A_=4 , A_="last" , A_=True , A_=None , A_=0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_lengths
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = gelu_activation
_SCREAMING_SNAKE_CASE = sinusoidal_embeddings
_SCREAMING_SNAKE_CASE = causal
_SCREAMING_SNAKE_CASE = asm
_SCREAMING_SNAKE_CASE = n_langs
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = n_special
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = summary_type
_SCREAMING_SNAKE_CASE = use_proj
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = bos_token_id
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_input_lengths:
_SCREAMING_SNAKE_CASE = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2 ).float()
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A ( self ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def A ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMModel(config=a_ )
model.to(a_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(a_ , lengths=a_ , langs=a_ )
_SCREAMING_SNAKE_CASE = model(a_ , langs=a_ )
_SCREAMING_SNAKE_CASE = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(a_ )
_SCREAMING_SNAKE_CASE = model(a_ , start_positions=a_ , end_positions=a_ )
_SCREAMING_SNAKE_CASE = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(a_ )
_SCREAMING_SNAKE_CASE = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , )
_SCREAMING_SNAKE_CASE = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , )
(_SCREAMING_SNAKE_CASE) = result_with_labels.to_tuple()
_SCREAMING_SNAKE_CASE = model(a_ , start_positions=a_ , end_positions=a_ )
(_SCREAMING_SNAKE_CASE) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(a_ )
_SCREAMING_SNAKE_CASE = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = XLMForTokenClassification(a_ )
model.to(a_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = XLMForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class __snake_case( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_A = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_A = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A ( self , A_ , A_ , A_=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a_ , emb_dim=37 )
def A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*a_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*a_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*a_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*a_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*a_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*a_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*a_ )
def A ( self , A_ , A_ , A_ , A_ , A_ , A_=False , A_=1 ):
'''simple docstring'''
self.assertIsInstance(a_ , a_ )
self.assertListEqual(
[isinstance(a_ , a_ ) for iter_attentions in attentions] , [True] * len(a_ ) )
self.assertEqual(len(a_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(a_ ):
# adds PAD dummy token
_SCREAMING_SNAKE_CASE = min_length + idx + 1
_SCREAMING_SNAKE_CASE = min_length + idx + 1
_SCREAMING_SNAKE_CASE = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(a_ ) )
def A ( self , A_ , A_ , A_ , A_ , A_ , A_=False , A_=1 ):
'''simple docstring'''
self.assertIsInstance(a_ , a_ )
self.assertListEqual(
[isinstance(a_ , a_ ) for iter_hidden_states in hidden_states] , [True] * len(a_ ) , )
self.assertEqual(len(a_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(a_ ):
# adds PAD dummy token
_SCREAMING_SNAKE_CASE = min_length + idx + 1
_SCREAMING_SNAKE_CASE = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(a_ ) , )
pass
@slow
def A ( self ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = XLMModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class __snake_case( unittest.TestCase ):
@slow
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(a_ )
_SCREAMING_SNAKE_CASE = torch.tensor([[14, 447]] , dtype=torch.long , device=a_ ) # the president
_SCREAMING_SNAKE_CASE = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_SCREAMING_SNAKE_CASE = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , a_ )
| 707
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
return x + 2
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 3'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {'''x''': 3} )
_SCREAMING_SNAKE_CASE = '''x = y'''
_SCREAMING_SNAKE_CASE = {'''y''': 5}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {'''x''': 5, '''y''': 5} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''y = add_two(x)'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result is None
assert "tried to execute add_two" in out.out
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 3'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {'''x''': 3} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(A_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 3\ny = 5'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 5} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''text = f\'This is x: {x}.\''''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A_ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 2} )
_SCREAMING_SNAKE_CASE = {'''x''': 8}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ , {'''x''': 8, '''y''': 5} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''test_list = [x, add_two(x)]'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
self.assertListEqual(A_ , [3, 5] )
self.assertDictEqual(A_ , {'''x''': 3, '''test_list''': [3, 5]} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''y = x'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {} , state=A_ )
assert result == 3
self.assertDictEqual(A_ , {'''x''': 3, '''y''': 3} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''test_list = [x, add_two(x)]\ntest_list[1]'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''test_list''': [3, 5]} )
_SCREAMING_SNAKE_CASE = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
_SCREAMING_SNAKE_CASE = {'''x''': 3}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''add_two''': add_two} , state=A_ )
assert result == 5
self.assertDictEqual(A_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''x = 0\nfor i in range(3):\n x = i'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = evaluate(A_ , {'''range''': range} , state=A_ )
assert result == 2
self.assertDictEqual(A_ , {'''x''': 2, '''i''': 2} )
| 168
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 91
|
import os
def __lowerCamelCase (UpperCAmelCase__ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(UpperCAmelCase__ ) , UpperCAmelCase__ ) ) as in_file:
SCREAMING_SNAKE_CASE = in_file.read()
SCREAMING_SNAKE_CASE = [[int(UpperCAmelCase__ ) for cell in row.split("," )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE = len(grid[0] )
SCREAMING_SNAKE_CASE = [[0 for i in range(UpperCAmelCase__ )] for j in range(UpperCAmelCase__ )]
SCREAMING_SNAKE_CASE = grid[0][0]
for i in range(1 , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase__ ):
for j in range(1 , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 403
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a = test_metrics
@require_cpu
def snake_case_ ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case_ ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case_ ( self ):
self.test_metrics.main()
@require_multi_gpu
def snake_case_ ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
| 703
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = ["""image_processor""", """tokenizer"""]
_lowerCamelCase = """LayoutLMv2ImageProcessor"""
_lowerCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __A=None , __A=None , **__A ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
__a = kwargs.pop("""feature_extractor""" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = None , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
__a = self.image_processor(images=__A , return_tensors=__A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__A , __A ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features["""words"""]
__a = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel values
__a = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(__A , encoded_inputs["""overflow_to_sample_mapping"""] )
__a = images
return encoded_inputs
def snake_case_ ( self , __A , __A ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__A ) != len(__A ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__A )} and {len(__A )}''' )
return images_with_overflow
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.batch_decode(*__A , **__A )
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.decode(*__A , **__A )
@property
def snake_case_ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def snake_case_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def snake_case_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor
| 209
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCAmelCase = 0
UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCAmelCase = tuple[int, int]
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Node | None , ) -> None:
_UpperCamelCase = pos_x
_UpperCamelCase = pos_y
_UpperCamelCase = (pos_y, pos_x)
_UpperCamelCase = goal_x
_UpperCamelCase = goal_y
_UpperCamelCase = g_cost
_UpperCamelCase = parent
_UpperCamelCase = self.calculate_heuristic()
_UpperCamelCase = self.g_cost + self.h_cost
def _UpperCamelCase ( self : Optional[Any] ) -> float:
_UpperCamelCase = self.pos_x - self.goal_x
_UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__UpperCamelCase ) + abs(__UpperCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , __UpperCamelCase : Node ) -> bool:
return self.f_cost < other.f_cost
class UpperCAmelCase_ :
def __init__( self : List[Any] , __UpperCamelCase : TPosition , __UpperCamelCase : TPosition ) -> Tuple:
_UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __UpperCamelCase )
_UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , __UpperCamelCase )
_UpperCamelCase = [self.start]
_UpperCamelCase = []
_UpperCamelCase = False
def _UpperCamelCase ( self : Union[str, Any] ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__UpperCamelCase )
self.closed_nodes.append(__UpperCamelCase )
_UpperCamelCase = self.get_successors(__UpperCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__UpperCamelCase )
else:
# retrieve the best current path
_UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(__UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__UpperCamelCase )
else:
self.open_nodes.append(__UpperCamelCase )
return [self.start.pos]
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Node ) -> list[Node]:
_UpperCamelCase = []
for action in delta:
_UpperCamelCase = parent.pos_x + action[1]
_UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__UpperCamelCase , __UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __UpperCamelCase , ) )
return successors
def _UpperCamelCase ( self : str , __UpperCamelCase : Node | None ) -> list[TPosition]:
_UpperCamelCase = node
_UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCamelCase = current_node.parent
path.reverse()
return path
class UpperCAmelCase_ :
def __init__( self : int , __UpperCamelCase : TPosition , __UpperCamelCase : TPosition ) -> None:
_UpperCamelCase = AStar(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = AStar(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = False
def _UpperCamelCase ( self : List[str] ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
_UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__UpperCamelCase , __UpperCamelCase )
self.fwd_astar.closed_nodes.append(__UpperCamelCase )
self.bwd_astar.closed_nodes.append(__UpperCamelCase )
_UpperCamelCase = current_bwd_node
_UpperCamelCase = current_fwd_node
_UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(__UpperCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__UpperCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__UpperCamelCase )
else:
# retrieve the best current path
_UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(__UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__UpperCamelCase )
else:
astar.open_nodes.append(__UpperCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Node , __UpperCamelCase : Node ) -> list[TPosition]:
_UpperCamelCase = self.fwd_astar.retrace_path(__UpperCamelCase )
_UpperCamelCase = self.bwd_astar.retrace_path(__UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
_UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCAmelCase = (0, 0)
UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase = time.time()
UpperCAmelCase = AStar(init, goal)
UpperCAmelCase = a_star.search()
UpperCAmelCase = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
UpperCAmelCase = time.time()
UpperCAmelCase = BidirectionalAStar(init, goal)
UpperCAmelCase = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 420
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''gpt_neox'''
def __init__( self : Dict , __UpperCamelCase : int=5_0432 , __UpperCamelCase : List[Any]=6144 , __UpperCamelCase : str=44 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : int=2_4576 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=0.2_5 , __UpperCamelCase : int=1_0000 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Dict=2048 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Optional[Any]=1E-5 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
_UpperCamelCase = self.rope_scaling.get('''type''' , __UpperCamelCase )
_UpperCamelCase = self.rope_scaling.get('''factor''' , __UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 420
| 1
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowercase_ : Optional[Any] = False
class __UpperCamelCase (unittest.TestCase ):
def _a ( self , _lowerCAmelCase=32 ) -> Union[str, Any]:
'''simple docstring'''
set_seed(0 )
lowercase = UNetaDModel(sample_size=_lowerCAmelCase , in_channels=3 , out_channels=3 )
lowercase = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=_lowerCAmelCase , )
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=_lowerCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_lowerCAmelCase ) for _ in range(4 )]
lowercase = [torch.randn((4, 3, 32, 32) ).to(_lowerCAmelCase ) for _ in range(4 )]
lowercase = [torch.randint(0 , 1000 , (4,) ).long().to(_lowerCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase , lowercase = self.get_model_optimizer(resolution=32 )
model.train().to(_lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase = model(_lowerCAmelCase , timesteps[i] ).sample
lowercase = torch.nn.functional.mse_loss(_lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase , lowercase = self.get_model_optimizer(resolution=32 )
model.train().to(_lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase = model(_lowerCAmelCase , timesteps[i] ).sample
lowercase = torch.nn.functional.mse_loss(_lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
| 708
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653
| 0
|
from __future__ import annotations
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ) -> Optional[int]:
lowerCAmelCase__ = data
lowerCAmelCase__ = None
def __repr__( self : str ) -> Optional[Any]:
lowerCAmelCase__ = []
lowerCAmelCase__ = self
while temp:
string_rep.append(f'{temp.data}' )
lowerCAmelCase__ = temp.next
return "->".join(SCREAMING_SNAKE_CASE__ )
def _A ( lowerCAmelCase_ : list ):
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
lowerCAmelCase__ = lowerCAmelCase__ = Node(elements_list[0] )
for i in range(1 , len(lowerCAmelCase_ ) ):
lowerCAmelCase__ = Node(elements_list[i] )
lowerCAmelCase__ = current.next
return head
def _A ( lowerCAmelCase_ : Node ):
"""simple docstring"""
if head_node is not None and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(lowerCAmelCase_ )
print("Elements in Reverse:" )
print_reverse(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 61
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
_lowercase = ksize + 1
_lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE_ ):
for x in range(SCREAMING_SNAKE_CASE_ ):
# distance from center
_lowercase = x - ksize // 2
_lowercase = y - ksize // 2
# degree to radiant
_lowercase = theta / 1_80 * np.pi
_lowercase = np.cos(_theta )
_lowercase = np.sin(_theta )
# get kernel x
_lowercase = cos_theta * px + sin_theta * py
# get kernel y
_lowercase = -sin_theta * px + cos_theta * py
# fill kernel
_lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A : Optional[Any] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
A : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A : Optional[int] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A : Optional[int] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A : int = out / out.max() * 255
A : str = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 287
| 0
|
'''simple docstring'''
import os
from distutils.util import strtobool
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for e in env_keys:
lowerCAmelCase = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) )
if val >= 0:
return val
return default
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int...
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]="no" ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return value
| 344
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase = 'roberta'
elif args.model_type == "gpt2":
UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCAmelCase = 'transformer'
UpperCAmelCase = model.state_dict()
UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCAmelCase = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCAmelCase = F'''{prefix}.embeddings.{w}.weight'''
UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCAmelCase = F'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCAmelCase = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCAmelCase = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[F'''lm_head.dense.{w}''']
UpperCAmelCase = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[F'''{prefix}.ln_f.{w}''']
UpperCAmelCase = state_dict['lm_head.weight']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 344
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.