code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A:
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=24 , _snake_case=2 , _snake_case=6 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=None , _snake_case=1_000 , ) -> int:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = scope
__a = range_bbox
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a = bbox[i, j, 3]
__a = bbox[i, j, 1]
__a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__a = bbox[i, j, 2]
__a = bbox[i, j, 0]
__a = t
__a = None
if self.use_input_mask:
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Dict:
'''simple docstring'''
__a = LiltModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__a = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__a = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Union[str, Any]:
'''simple docstring'''
__a = self.num_labels
__a = LiltForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Optional[int]:
'''simple docstring'''
__a = LiltForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __A( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
snake_case_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> int:
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = LiltModelTester(self )
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = LiltModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__SCREAMING_SNAKE_CASE )
__a = torch.tensor([[1, 2]] , device=__SCREAMING_SNAKE_CASE )
__a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__a = model(input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE )
__a = torch.Size([1, 2, 768] )
__a = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 6
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''switch_transformers'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_sparse_encoder_layers
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a = self.num_layers // self.num_sparse_encoder_layers
else:
__a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = router_bias
__a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
__a = router_dtype
__a = router_ignore_padding_tokens
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = add_router_probs
__a = router_z_loss_coef
__a = router_aux_loss_coef
__a = self.feed_forward_proj.split('''-''')
__a = act_info[-1]
__a = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a = '''gelu_new'''
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 49
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowercase ( lowerCamelCase__ ):
"""simple docstring"""
_a = 'cvt'
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 192, 384] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**__A )
UpperCamelCase__ :Tuple = num_channels
UpperCamelCase__ :Tuple = patch_sizes
UpperCamelCase__ :Dict = patch_stride
UpperCamelCase__ :Optional[int] = patch_padding
UpperCamelCase__ :Optional[int] = embed_dim
UpperCamelCase__ :List[Any] = num_heads
UpperCamelCase__ :Any = depth
UpperCamelCase__ :Dict = mlp_ratio
UpperCamelCase__ :Optional[Any] = attention_drop_rate
UpperCamelCase__ :str = drop_rate
UpperCamelCase__ :Dict = drop_path_rate
UpperCamelCase__ :List[Any] = qkv_bias
UpperCamelCase__ :int = cls_token
UpperCamelCase__ :Union[str, Any] = qkv_projection_method
UpperCamelCase__ :int = kernel_qkv
UpperCamelCase__ :List[Any] = padding_kv
UpperCamelCase__ :int = stride_kv
UpperCamelCase__ :List[Any] = padding_q
UpperCamelCase__ :List[str] = stride_q
UpperCamelCase__ :Dict = initializer_range
UpperCamelCase__ :Dict = layer_norm_eps
| 361
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def a ( __a=None ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = argparse.ArgumentParser(add_help=__a , allow_abbrev=__a )
# The main config parser
UpperCamelCase__ :str = config_command_parser(__a )
# The subparser to add commands to
UpperCamelCase__ :Union[str, Any] = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(__a , parents=[parent_parser] )
update_command_parser(__a , parents=[parent_parser] )
return config_parser
def a ( ) -> Any:
'''simple docstring'''
UpperCamelCase__ :int = get_config_parser()
UpperCamelCase__ :List[Any] = config_parser.parse_args()
if not hasattr(__a , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 219
| 0
|
from math import isclose, sqrt
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[float, float, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = point_y / 4 / point_x
SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
SCREAMING_SNAKE_CASE = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE = x_minus if isclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else x_plus
SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __lowercase ( _SCREAMING_SNAKE_CASE = 1.4 , _SCREAMING_SNAKE_CASE = -9.6 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = first_x_coord
SCREAMING_SNAKE_CASE = first_y_coord
SCREAMING_SNAKE_CASE = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = next_point(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296
|
import math
import unittest
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,)
self.assertFalse(
is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 296
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = (DDPMScheduler,)
def lowercase_ ( self : Optional[int] , **_A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCamelCase_ )
return config
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowercase_ ( self : str ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def lowercase_ ( self : str ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCamelCase_ )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : str = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : List[str] = scheduler_class(**lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = len(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase__ : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
UpperCAmelCase__ : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ : Any = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase__ : str = pred_prev_sample
UpperCAmelCase__ : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
UpperCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase__ : Any = scheduler_class(**lowerCamelCase_ )
UpperCAmelCase__ : List[str] = len(lowerCamelCase_ )
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : str = self.dummy_sample_deter
UpperCAmelCase__ : List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
UpperCAmelCase__ : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase__ : List[Any] = pred_prev_sample
UpperCAmelCase__ : int = torch.sum(torch.abs(lowerCamelCase_ ) )
UpperCAmelCase__ : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : Optional[Any] = scheduler_class(**lowerCamelCase_ )
UpperCAmelCase__ : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
UpperCAmelCase__ : Dict = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
UpperCAmelCase__ : Any = -1
else:
UpperCAmelCase__ : Dict = timesteps[i + 1]
UpperCAmelCase__ : Optional[Any] = scheduler.previous_timestep(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = prev_t.item()
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : Optional[int] = scheduler_class(**lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase__ : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = [100, 87, 50, 1, 0]
UpperCAmelCase__ : str = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.scheduler_classes[0]
UpperCAmelCase__ : Tuple = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**lowerCamelCase_ )
UpperCAmelCase__ : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 361
|
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]:
UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) )
UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : float = 0
UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__snake_case = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__snake_case = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
__snake_case = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = ['input_ids', 'attention_mask']
_a = []
_a = []
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
UpperCamelCase__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
UpperCamelCase__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase__ :Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ :Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :int = len(self.sp_model )
UpperCamelCase__ :Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
UpperCamelCase__ :List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ :Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ :Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ :Any = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase__ :Optional[Any] = self.lang_code_to_id[self._src_lang]
UpperCamelCase__ :Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.__dict__.copy()
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :Optional[int] = {}
UpperCamelCase__ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :List[str] = [1] * len(self.prefix_tokens )
UpperCamelCase__ :int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ :Tuple = src_lang
UpperCamelCase__ :Optional[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase__ :Dict = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ :Any = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = "en_XX" , UpperCamelCase_ = None , UpperCamelCase_ = "ro_RO" , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = src_lang
UpperCamelCase__ :Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.lang_code_to_id[src_lang]
UpperCamelCase__ :int = []
UpperCamelCase__ :Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.lang_code_to_id[lang]
UpperCamelCase__ :Optional[Any] = []
UpperCamelCase__ :Tuple = [self.eos_token_id, self.cur_lang_code]
| 97
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 0
|
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = ''''''
snake_case : List[str] = ''''''
snake_case : List[str] = []
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case : Dict = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
snake_case : Dict = self.__min_dist_top_down_dp(UpperCamelCase__ , n - 1 )
snake_case : int = self.__min_dist_top_down_dp(m - 1 , UpperCamelCase__ )
snake_case : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
snake_case : List[str] = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
"""simple docstring"""
snake_case : Optional[Any] = worda
snake_case : Optional[int] = worda
snake_case : Union[str, Any] = [[-1 for _ in range(len(UpperCamelCase__ ) )] for _ in range(len(UpperCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCamelCase__ ) - 1 , len(UpperCamelCase__ ) - 1 )
def lowerCAmelCase ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
"""simple docstring"""
snake_case : Any = worda
snake_case : Tuple = worda
snake_case : List[Any] = len(UpperCamelCase__ )
snake_case : Dict = len(UpperCamelCase__ )
snake_case : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case : int = j
elif j == 0: # second string is empty
snake_case : str = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case : Optional[Any] = self.dp[i - 1][j - 1]
else:
snake_case : Optional[Any] = self.dp[i][j - 1]
snake_case : Tuple = self.dp[i - 1][j]
snake_case : Tuple = self.dp[i - 1][j - 1]
snake_case : Dict = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
lowercase__ = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
lowercase__ = input("Enter the first string: ").strip()
lowercase__ = input("Enter the second string: ").strip()
print()
print(f"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(f"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 83
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1000 ) -> int:
'''simple docstring'''
snake_case : Dict = 1
snake_case : str = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE__ , digit + 1 ):
snake_case : list[int] = []
snake_case : Optional[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE__ ):
snake_case : Dict = len(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE__ )
snake_case : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_lowercase: List[str] = random.Random()
def a( A : Union[str, Any] , A : int=1.0 , A : List[Any]=None , A : Optional[int]=None ) -> Dict:
"""simple docstring"""
if rng is None:
a = global_rng
a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=400 , lowerCamelCase_=2000 , lowerCamelCase_=1 , lowerCamelCase_=0.0 , lowerCamelCase_=16000 , lowerCamelCase_=True , lowerCamelCase_=80 , lowerCamelCase_=16 , lowerCamelCase_=64 , lowerCamelCase_="hann_window" , lowerCamelCase_=80 , lowerCamelCase_=7600 , lowerCamelCase_=1E-1_0 , lowerCamelCase_=True , ):
"""simple docstring"""
a = parent
a = batch_size
a = min_seq_length
a = max_seq_length
a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a = feature_size
a = padding_value
a = sampling_rate
a = do_normalize
a = num_mel_bins
a = hop_length
a = win_length
a = win_function
a = fmin
a = fmax
a = mel_floor
a = return_attention_mask
def UpperCamelCase_ (self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCamelCase_ (self , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
def _flatten(lowerCamelCase_ ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
def UpperCamelCase_ (self , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
if equal_length:
a = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = SpeechTaFeatureExtractor
def UpperCamelCase_ (self ):
"""simple docstring"""
a = SpeechTaFeatureExtractionTester(self )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test batched
a = feat_extract(lowerCamelCase_ , return_tensors="np" ).input_values
a = feat_extract(lowerCamelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = ["longest", "max_length", "do_not_pad"]
a = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
a = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors="np" )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = range(800 , 1400 , 200 )
a = [floats_list((1, x) )[0] for x in lengths]
a = ["longest", "max_length", "do_not_pad"]
a = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
a = feat_extract(lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = feat_extract(
lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=1000 , padding="max_length" , return_tensors="np" )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = feat_extract(
lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=1000 , padding="longest" , return_tensors="np" )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = feat_extract(
lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=2000 , padding="longest" , return_tensors="np" )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = np.random.rand(100 ).astype(np.floataa )
a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
a = feature_extractor(audio_target=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test batched
a = feature_extractor(lowerCamelCase_ , return_tensors="np" ).input_values
a = feature_extractor(lowerCamelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a = np.asarray(lowerCamelCase_ )
a = feature_extractor(lowerCamelCase_ , return_tensors="np" ).input_values
a = feature_extractor(lowerCamelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feat_extract_tester.prepare_inputs_for_target()
a = self.feature_extraction_class(**self.feat_extract_dict )
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) for x, y in zip(lowerCamelCase_ , processed_features[input_name] ) ) )
a = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase_ )
a = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase_ )
a = self.feature_extraction_class(**self.feat_extract_dict )
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_dict )
a = self.feat_extract_tester.prepare_inputs_for_target()
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} )
a = feat_extract.num_mel_bins # hack!
a = feat_extract.pad(lowerCamelCase_ , padding="longest" , return_tensors="np" )[input_name]
a = feat_extract.pad(lowerCamelCase_ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feat_extract_dict
a = True
a = self.feature_extraction_class(**lowerCamelCase_ )
a = self.feat_extract_tester.prepare_inputs_for_target()
a = [len(lowerCamelCase_ ) for x in speech_inputs]
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} )
a = feat_extract.num_mel_bins # hack!
a = feat_extract.pad(lowerCamelCase_ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.feat_extract_dict
a = True
a = self.feature_extraction_class(**lowerCamelCase_ )
a = self.feat_extract_tester.prepare_inputs_for_target()
a = [len(lowerCamelCase_ ) for x in speech_inputs]
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} )
a = min(lowerCamelCase_ )
a = feat_extract.num_mel_bins # hack!
a = feat_extract.pad(
lowerCamelCase_ , padding="max_length" , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
from datasets import load_dataset
a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a = ds.sort("id" ).select(range(lowerCamelCase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCamelCase_ (self ):
"""simple docstring"""
a = torch.tensor(
[2.3_8_0_4E-0_3, 2.0_7_5_2E-0_3, 1.9_8_3_6E-0_3, 2.1_0_5_7E-0_3, 1.6_1_7_4E-0_3,
3.0_5_1_8E-0_4, 9.1_5_5_3E-0_5, 3.3_5_6_9E-0_4, 9.7_6_5_6E-0_4, 1.8_3_1_1E-0_3,
2.0_1_4_2E-0_3, 2.1_0_5_7E-0_3, 1.7_3_9_5E-0_3, 4.5_7_7_6E-0_4, -3.9_6_7_3E-0_4,
4.5_7_7_6E-0_4, 1.0_0_7_1E-0_3, 9.1_5_5_3E-0_5, 4.8_8_2_8E-0_4, 1.1_5_9_7E-0_3,
7.3_2_4_2E-0_4, 9.4_6_0_4E-0_4, 1.8_0_0_5E-0_3, 1.8_3_1_1E-0_3, 8.8_5_0_1E-0_4,
4.2_7_2_5E-0_4, 4.8_8_2_8E-0_4, 7.3_2_4_2E-0_4, 1.0_9_8_6E-0_3, 2.1_0_5_7E-0_3] )
# fmt: on
a = self._load_datasamples(1 )
a = SpeechTaFeatureExtractor()
a = feature_extractor(lowerCamelCase_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCamelCase_ , atol=1E-6 ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
a = self._load_datasamples(1 )
a = SpeechTaFeatureExtractor()
a = feature_extractor(audio_target=lowerCamelCase_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase_ , atol=1E-4 ) )
| 227
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowercase: Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
a = {}
a = {}
a = {}
# preprocess args
if "points_per_batch" in kwargs:
a = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
a = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
a = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
a = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
a = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
a = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
a = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
a = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
a = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
a = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
a = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
a = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(lowerCamelCase_ , *lowerCamelCase_ , num_workers=lowerCamelCase_ , batch_size=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_ = 0 , lowerCamelCase_ = 512 / 1500 , lowerCamelCase_ = 32 , lowerCamelCase_ = 1 , ):
"""simple docstring"""
a = load_image(lowerCamelCase_ )
a = self.image_processor.size["longest_edge"]
a , a , a , a = self.image_processor.generate_crop_boxes(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = self.image_processor(images=lowerCamelCase_ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
a = self.get_inference_context()
with inference_context():
a = self._ensure_tensor_on_device(lowerCamelCase_ , device=self.device )
a = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
a = image_embeddings
a = grid_points.shape[1]
a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
a = grid_points[:, i : i + points_per_batch, :, :]
a = input_labels[:, i : i + points_per_batch]
a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0.88 , lowerCamelCase_=0.95 , lowerCamelCase_=0 , lowerCamelCase_=1 , ):
"""simple docstring"""
a = model_inputs.pop("input_boxes" )
a = model_inputs.pop("is_last" )
a = model_inputs.pop("original_sizes" ).tolist()
a = model_inputs.pop("reshaped_input_sizes" ).tolist()
a = self.model(**lowerCamelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
a = model_outputs["pred_masks"]
a = self.image_processor.post_process_masks(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , binarize=lowerCamelCase_ )
a = model_outputs["iou_scores"]
a , a , a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=0.7 , ):
"""simple docstring"""
a = []
a = []
a = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
a = torch.cat(lowerCamelCase_ )
a = torch.cat(lowerCamelCase_ )
a , a , a , a = self.image_processor.post_process_for_mask_generation(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = defaultdict(lowerCamelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase_ )
a = {}
if output_rle_mask:
a = rle_mask
if output_bboxes_mask:
a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 227
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __snake_case( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Union[str, Any]:
lowerCAmelCase = path_or_paths
lowerCAmelCase = split if split or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else """train"""
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def __snake_case ( self ) -> List[str]:
pass
class __snake_case( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Any:
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def __snake_case ( self ) -> Union[str, Any]:
pass
| 352
|
'''simple docstring'''
import numpy
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ ) -> None:
lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase = numpy.zeros(output_array.shape )
def __snake_case ( self ) -> numpy.ndarray:
lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __snake_case ( self ) -> None:
lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __snake_case ( self , A_ , A_ , A_ ) -> None:
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __snake_case ( self , A_ ) -> int:
lowerCAmelCase = input_arr
lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _SCREAMING_SNAKE_CASE : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _SCREAMING_SNAKE_CASE : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def _snake_case ( ) -> int:
"""simple docstring"""
lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=10 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 187
| 0
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def __magic_name__ ( __snake_case : Any ) -> List[Any]:
lowercase : List[Any] = min(__snake_case ) # min() finds the minimum value
lowercase : Tuple = max(__snake_case ) # max() finds the maximum value
lowercase : Tuple = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase : Optional[int] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__snake_case , __snake_case ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase : Tuple = 0
for count in range(__snake_case ):
while holes[count] > 0:
holes[count] -= 1
lowercase : List[str] = count + min_val
i += 1
def __magic_name__ ( ) -> Dict:
lowercase : Union[str, Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__snake_case )
print("Sorted order is:" , " ".join(__snake_case ) )
if __name__ == "__main__":
main()
| 202
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_A : Union[str, Any] = None
_A : str = logging.get_logger(__name__)
_A : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_A : Optional[Any] = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
_A : Optional[Any] = {
"""google/fnet-base""": 5_12,
"""google/fnet-large""": 5_12,
}
_A : List[str] = """▁"""
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """token_type_ids"""]
__lowerCAmelCase = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
lowercase : Dict = do_lower_case
lowercase : Union[str, Any] = remove_space
lowercase : Any = keep_accents
lowercase : List[Any] = vocab_file
lowercase : Union[str, Any] = False if not self.vocab_file else True
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , _a , _a = None ):
lowercase : Any = [self.sep_token_id]
lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 202
| 1
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 1
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowercase : List[Any] = 'facebook/wmt19-en-de'
_lowercase : str = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowercase : Union[str, Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowercase : Optional[int] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
_lowercase : Dict = tokenizer(['Making tiny model'], return_tensors='pt')
_lowercase : int = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
_lowercase : Dict = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 332
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 25_00_04
_lowercase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Union[str, Any] = MBartaaTokenizer
a__ : List[str] = MBartaaTokenizerFast
a__ : Any = True
a__ : List[str] = True
def a ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : Dict ):
__UpperCAmelCase = '''<s>'''
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_lowercase ) , 10_54 )
def a ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def a ( self : str ):
__UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase )
__UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def a ( self : str ):
# fmt: off
__UpperCAmelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def a ( self : str ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
a__ : str = "facebook/mbart-large-50-one-to-many-mmt"
a__ : Union[str, Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
a__ : Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
a__ : Any = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def a ( cls : Tuple ):
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__UpperCAmelCase = 1
return cls
def a ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def a ( self : Optional[Any] ):
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
__UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _lowercase )
__UpperCAmelCase = 10
__UpperCAmelCase = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0]
self.assertEqual(ids[0] , _lowercase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_lowercase ) , _lowercase )
def a ( self : Optional[int] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowercase )
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase )
@require_torch
def a ( self : Dict ):
__UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors='''pt''' )
__UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors='''pt''' )
__UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors='''pt''' )
__UpperCAmelCase = targets['''input_ids''']
__UpperCAmelCase = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self : Dict ):
__UpperCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_lowercase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 332
| 1
|
def lowerCAmelCase_ ( __a ):
"""simple docstring"""
lowerCamelCase__: str =[0] * len(__a )
lowerCamelCase__: Dict =[]
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: int =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__a ) ):
if indegree[i] == 0:
queue.append(__a )
while queue:
lowerCamelCase__: Optional[int] =queue.pop(0 )
cnt += 1
topo.append(__a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__a )
if cnt != len(__a ):
print("Cycle exists" )
else:
print(__a )
# Adjacency List of Graph
__A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 358
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A = get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
lowercase_ = "all_checks"
lowercase_ = "basic_checks"
lowercase_ = "no_checks"
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( __a , __a , __a=None ) -> Optional[int]:
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__a ) - set(__a ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__a ) - set(__a ) ) )
if len(set(__a ) - set(__a ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__a ) - set(__a ) ) )
lowerCamelCase__: List[Any] =[url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCamelCase__: Union[str, Any] =" for " + verification_name if verification_name is not None else ""
if len(__a ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__a ) - set(__a ) ) > 0:
raise ExpectedMoreSplits(str(set(__a ) - set(__a ) ) )
if len(set(__a ) - set(__a ) ) > 0:
raise UnexpectedSplits(str(set(__a ) - set(__a ) ) )
lowerCamelCase__: Optional[int] =[
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__a ) > 0:
raise NonMatchingSplitsSizesError(str(__a ) )
logger.info("All the splits matched successfully." )
def lowerCAmelCase_ ( __a , __a = True ) -> dict:
"""simple docstring"""
if record_checksum:
lowerCamelCase__: str =shaaaa()
with open(__a , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(__a )
lowerCamelCase__: Dict =m.hexdigest()
else:
lowerCamelCase__: List[str] =None
return {"num_bytes": os.path.getsize(__a ), "checksum": checksum}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 273
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( a_ ,a_=1_000 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__UpperCamelCase : List[Any] =n - 1
__UpperCamelCase : Dict =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__UpperCamelCase : Optional[Any] =0
while count < prec:
__UpperCamelCase : Dict =random.randint(2 ,n - 1 )
__UpperCamelCase : Optional[Any] =bin_exp_mod(a_ ,a_ ,a_ )
if b != 1:
__UpperCamelCase : List[str] =True
for _ in range(a_ ):
if b == n - 1:
__UpperCamelCase : Tuple =False
break
__UpperCamelCase : Dict =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A_ :str = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 71
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =StableDiffusionDiffEditPipeline
UpperCamelCase__ : str =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
UpperCamelCase__ : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
UpperCamelCase__ : Dict =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : Any =frozenset([] )
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , )
__UpperCamelCase : List[str] =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
__UpperCamelCase : Union[str, Any] =DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_zero=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCamelCase : Tuple =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__UpperCamelCase : Any =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : int =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : Union[str, Any] ={
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : int =floats_tensor((1, 16, 16) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : Any =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Any =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[int] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Dict ={
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Optional[Any] =Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : List[Any] =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Any =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[int] ={
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : int =Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Any =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : int =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[int] ={
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
__UpperCamelCase : Optional[Any] =self.get_dummy_components()
__UpperCamelCase : List[str] =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] =pipe(**lowerCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Tuple =self.pipeline_class.from_pretrained(lowerCamelCase__ )
pipe_loaded.to(lowerCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase__ , lowerCamelCase__ ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
__UpperCamelCase : str =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe_loaded(**lowerCamelCase__ )[0]
__UpperCamelCase : Tuple =np.abs(output - output_loaded ).max()
self.assertLess(lowerCamelCase__ , 1E-4 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ='cpu'
__UpperCamelCase : Union[str, Any] =self.get_dummy_components()
__UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : int =self.get_dummy_mask_inputs(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe.generate_mask(**lowerCamelCase__ )
__UpperCamelCase : int =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__UpperCamelCase : Tuple =np.array([0] * 9 )
__UpperCamelCase : str =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int ='cpu'
__UpperCamelCase : Union[str, Any] =self.get_dummy_components()
__UpperCamelCase : Optional[Any] =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Dict =self.get_dummy_inversion_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] =pipe.invert(**lowerCamelCase__ ).images
__UpperCamelCase : Optional[Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__UpperCamelCase : List[str] =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
__UpperCamelCase : int =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ='cpu'
__UpperCamelCase : int =self.get_dummy_components()
__UpperCamelCase : str ={'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
__UpperCamelCase : str =DPMSolverMultistepScheduler(**lowerCamelCase__ )
__UpperCamelCase : Dict =DPMSolverMultistepInverseScheduler(**lowerCamelCase__ )
__UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple =self.get_dummy_inversion_inputs(lowerCamelCase__ )
__UpperCamelCase : str =pipe.invert(**lowerCamelCase__ ).images
__UpperCamelCase : List[Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__UpperCamelCase : List[str] =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
__UpperCamelCase : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowercase ( cls ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
__UpperCamelCase : Union[str, Any] =raw_image.convert('RGB' ).resize((768, 768) )
__UpperCamelCase : List[Any] =raw_image
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : Dict =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
__UpperCamelCase : List[str] =DDIMScheduler.from_config(pipe.scheduler.config )
__UpperCamelCase : List[str] =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[str] ='a bowl of fruit'
__UpperCamelCase : Dict ='a bowl of pears'
__UpperCamelCase : Tuple =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCamelCase__ , target_prompt=lowerCamelCase__ , generator=lowerCamelCase__ , )
__UpperCamelCase : int =pipe.invert(
prompt=lowerCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCamelCase__ ).latents
__UpperCamelCase : Dict =pipe(
prompt=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_latents=lowerCamelCase__ , generator=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
__UpperCamelCase : str =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =torch.manual_seed(0 )
__UpperCamelCase : List[Any] =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
__UpperCamelCase : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__UpperCamelCase : Optional[int] =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ='a bowl of fruit'
__UpperCamelCase : int ='a bowl of pears'
__UpperCamelCase : str =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCamelCase__ , target_prompt=lowerCamelCase__ , generator=lowerCamelCase__ , )
__UpperCamelCase : List[str] =pipe.invert(
prompt=lowerCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCamelCase__ , num_inference_steps=25 , ).latents
__UpperCamelCase : List[str] =pipe(
prompt=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_latents=lowerCamelCase__ , generator=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
__UpperCamelCase : Tuple =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 71
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : str = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 272
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
if "model" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowerCamelCase__ : List[Any] =orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowerCamelCase__ : List[str] =orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowerCamelCase__ : str =orig_key.split('''.''' )[0].split('''_''' )[-1]
lowerCamelCase__ : Dict =orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowerCamelCase__ : str =orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowerCamelCase__ : List[str] =orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowerCamelCase__ : Dict =orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowerCamelCase__ : str =orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowerCamelCase__ : Tuple =orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowerCamelCase__ : Optional[int] ='''yoso.''' + orig_key
return orig_key
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Optional[Any] =orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase__ : List[str] =val
lowerCamelCase__ : Optional[int] =orig_state_dict['''cls.predictions.decoder.bias''']
lowerCamelCase__ : str =torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowerCamelCase__ : List[Any] =YosoConfig.from_json_file(__lowerCamelCase )
lowerCamelCase__ : List[str] =YosoForMaskedLM(__lowerCamelCase )
lowerCamelCase__ : Tuple =convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 272
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , UpperCAmelCase__ : int = 768 , ) ->Tuple:
'''simple docstring'''
super().__init__()
A__ = nn.Parameter(torch.zeros(1 , UpperCAmelCase__))
A__ = nn.Parameter(torch.ones(1 , UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Union[str, torch.device]] = None , UpperCAmelCase__ : Optional[torch.dtype] = None , ) ->List[str]:
'''simple docstring'''
A__ = nn.Parameter(self.mean.to(UpperCAmelCase__).to(UpperCAmelCase__))
A__ = nn.Parameter(self.std.to(UpperCAmelCase__).to(UpperCAmelCase__))
return self
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
A__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, Any]) ->str:
'''simple docstring'''
A__ = (embeds * self.std) + self.mean
return embeds
| 14
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = F'''class {class_name}('''
UpperCAmelCase__ = F'''{4 * ' '}def {test_name}('''
UpperCAmelCase__ = F'''{8 * ' '}{correct_line.split()[0]}'''
UpperCAmelCase__ = F'''{16 * ' '}{correct_line.split()[0]}'''
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = []
for line in lines:
if line.startswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = True
elif in_class and line.startswith(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = True
elif in_class and in_func and (line.startswith(SCREAMING_SNAKE_CASE__ ) or line.startswith(SCREAMING_SNAKE_CASE__ )):
UpperCAmelCase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * ' '}{correct_line}''' )
UpperCAmelCase__ = UpperCAmelCase__ = UpperCAmelCase__ = UpperCAmelCase__ = False
else:
new_lines.append(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
for line in new_lines:
f.write(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
'''simple docstring'''
if fail is not None:
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as f:
UpperCAmelCase__ = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase__ = None
with open(SCREAMING_SNAKE_CASE__ , """r""" ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
for line in correct_lines:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
UpperCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 61
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
| 1
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = RoFormerTokenizer
__SCREAMING_SNAKE_CASE = RoFormerTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self ):
super().setUp()
def UpperCamelCase ( self,**__lowerCamelCase ):
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''',**__lowerCamelCase )
def UpperCamelCase ( self,**__lowerCamelCase ):
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''',**__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = '''永和服装饰品有限公司,今天天气非常好'''
A__ = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def UpperCamelCase ( self ):
A__ = self.get_tokenizer()
A__ , A__ = self.get_chinese_input_output_texts()
A__ = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,output_text.split() )
A__ = tokens + [tokenizer.unk_token]
A__ = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ),__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_rust_tokenizer()
A__ , A__ = self.get_chinese_input_output_texts()
A__ = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,output_text.split() )
A__ = tokens + [tokenizer.unk_token]
A__ = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ),__lowerCamelCase )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
| 193
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxBertModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''bert-base is not a local folder and is not a valid model identifier''' ):
A__ = FlaxAutoModel.from_pretrained('''bert-base''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase,revision='''aaaaaa''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''',):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(__lowerCamelCase,'''Use `from_pt=True` to load this model''' ):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 193
| 1
|
import torch
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _a , _a , _a , _a , _a=1 , _a=False ) -> Optional[int]:
super().__init__()
_a : int = n_token
_a : Optional[int] = d_embed
_a : Optional[Any] = d_proj
_a : List[str] = cutoffs + [n_token]
_a : str = [0] + self.cutoffs
_a : Tuple = div_val
_a : str = self.cutoffs[0]
_a : str = len(self.cutoffs ) - 1
_a : Dict = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_a : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_a : str = nn.Parameter(torch.zeros(self.n_clusters ) )
_a : Union[str, Any] = nn.ModuleList()
_a : str = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) )
else:
self.out_projs.append(_a )
self.out_layers.append(nn.Linear(_a , _a ) )
else:
for i in range(len(self.cutoffs ) ):
_a , _a : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_a : Any = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_a , _a ) ) )
self.out_layers.append(nn.Linear(_a , r_idx - l_idx ) )
_a : List[str] = keep_order
def __lowercase ( self , _a , _a , _a , _a ) -> str:
if proj is None:
_a : Optional[int] = nn.functional.linear(_a , _a , bias=_a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_a : Union[str, Any] = nn.functional.linear(_a , proj.t().contiguous() )
_a : Optional[int] = nn.functional.linear(_a , _a , bias=_a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __lowercase ( self , _a , _a=None , _a=False ) -> Optional[Any]:
if labels is not None:
# Shift so that tokens < n predict n
_a : List[Any] = hidden[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : int = hidden.view(-1 , hidden.size(-1 ) )
_a : int = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
_a : str = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_a : List[str] = self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_a : Dict = labels != -1_0_0
_a : List[str] = torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device )
_a : str = (
-nn.functional.log_softmax(_a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_a : int = nn.functional.log_softmax(_a , dim=-1 )
else:
# construct weights and biases
_a , _a : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_a , _a : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_a : Dict = self.out_layers[0].weight[l_idx:r_idx]
_a : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
_a : str = self.out_layers[i].weight
_a : Any = self.out_layers[i].bias
if i == 0:
_a : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_a : Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_a )
biases.append(_a )
_a , _a , _a : Tuple = weights[0], biases[0], self.out_projs[0]
_a : Dict = self._compute_logit(_a , _a , _a , _a )
_a : Dict = nn.functional.log_softmax(_a , dim=1 )
if labels is None:
_a : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_a : Optional[int] = torch.zeros_like(_a , dtype=hidden.dtype , device=hidden.device )
_a : List[str] = 0
_a : Optional[int] = [0] + self.cutoffs
for i in range(len(_a ) - 1 ):
_a , _a : List[str] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_a : Dict = (labels >= l_idx) & (labels < r_idx)
_a : Optional[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_a : Optional[int] = labels.index_select(0 , _a ) - l_idx
_a : List[str] = head_logprob.index_select(0 , _a )
_a : List[str] = hidden.index_select(0 , _a )
else:
_a : Optional[Any] = hidden
if i == 0:
if labels is not None:
_a : List[str] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_a : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_a , _a , _a : List[Any] = weights[i], biases[i], self.out_projs[i]
_a : Tuple = self._compute_logit(_a , _a , _a , _a )
_a : Any = nn.functional.log_softmax(_a , dim=1 )
_a : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_a : Any = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_a : Optional[int] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_a : Any = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __lowercase ( self , _a ) -> int:
if self.n_clusters == 0:
_a : Union[str, Any] = self._compute_logit(_a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_a , dim=-1 )
else:
# construct weights and biases
_a , _a : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_a , _a : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_a : Any = self.out_layers[0].weight[l_idx:r_idx]
_a : str = self.out_layers[0].bias[l_idx:r_idx]
else:
_a : Optional[Any] = self.out_layers[i].weight
_a : List[Any] = self.out_layers[i].bias
if i == 0:
_a : List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_a : Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_a )
biases.append(_a )
_a , _a , _a : Any = weights[0], biases[0], self.out_projs[0]
_a : List[Any] = self._compute_logit(_a , _a , _a , _a )
_a : Dict = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_a : List[Any] = nn.functional.log_softmax(_a , dim=1 )
_a : int = [0] + self.cutoffs
for i in range(len(_a ) - 1 ):
_a , _a : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_a : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
_a , _a , _a : Tuple = weights[i], biases[i], self.out_projs[i]
_a : Optional[int] = self._compute_logit(_a , _a , _a , _a )
_a : Union[str, Any] = nn.functional.log_softmax(_a , dim=1 )
_a : Any = head_logprob[:, -i] + tail_logprob_i
_a : Tuple = logprob_i
return out
| 362
|
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15
| 0
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
@abstractmethod
def __A ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def __A ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
| 270
| 1
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
snake_case_ : int = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
snake_case_ : Optional[Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : int = (images / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCamelCase : List[str] = numpy_to_pil(UpperCAmelCase_ )
return images
def A__ ( UpperCAmelCase_ ):
if images.ndim == 3:
_UpperCamelCase : Optional[int] = images[None, ...]
_UpperCamelCase : List[Any] = (images * 2_5_5).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_UpperCamelCase : Optional[Any] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
_UpperCamelCase : List[str] = [Image.fromarray(UpperCAmelCase_ ) for image in images]
return pil_images
| 364
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_UpperCamelCase : Any = 1_0_2_4
_UpperCamelCase : List[Any] = 4_0_9_6
_UpperCamelCase : List[str] = 2_4
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Union[str, Any] = [5, 1_1, 1_7, 2_3]
_UpperCamelCase : Any = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
_UpperCamelCase : Tuple = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
_UpperCamelCase : Optional[int] = 7_6_8
_UpperCamelCase : Optional[Any] = [1, 1, 1, 0.5]
_UpperCamelCase : List[Any] = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
_UpperCamelCase : Optional[int] = 1_5_0
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Dict = (1, 3_8_4, 3_8_4)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = 'project'
if "ade" in checkpoint_url:
_UpperCamelCase : Dict = True
_UpperCamelCase : Dict = 7_6_8
_UpperCamelCase : Union[str, Any] = [1, 1, 1, 0.5]
_UpperCamelCase : Union[str, Any] = 1_5_0
_UpperCamelCase : str = 1_6
_UpperCamelCase : Tuple = 'huggingface/label-files'
_UpperCamelCase : Tuple = 'ade20k-id2label.json'
_UpperCamelCase : Tuple = json.load(open(cached_download(hf_hub_url(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) ) , 'r' ) )
_UpperCamelCase : str = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCamelCase : int = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase : List[str] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_UpperCamelCase : int = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_UpperCamelCase : Any = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_UpperCamelCase : Tuple = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_UpperCamelCase : List[str] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_UpperCamelCase : int = name.replace('proj' , 'projection' )
if "blocks" in name:
_UpperCamelCase : List[str] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCamelCase : str = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_UpperCamelCase : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_UpperCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_UpperCamelCase : Dict = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_UpperCamelCase : List[str] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_UpperCamelCase : List[str] = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_UpperCamelCase : Any = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_UpperCamelCase : int = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_UpperCamelCase : Dict = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_UpperCamelCase : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase : str = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_UpperCamelCase : Dict = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_UpperCamelCase : int = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_UpperCamelCase : Dict = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase : str = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase : Dict = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_UpperCamelCase : int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_UpperCamelCase : Union[str, Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
_UpperCamelCase : Dict = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_UpperCamelCase : str = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_UpperCamelCase : Any = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_UpperCamelCase : List[Any] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_UpperCamelCase : Dict = name.replace('..' , '.' )
if "stem.conv" in name:
_UpperCamelCase : Tuple = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_UpperCamelCase : Optional[int] = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_UpperCamelCase : List[str] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_UpperCamelCase : Union[str, Any] = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_UpperCamelCase : Dict = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_UpperCamelCase : str = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_UpperCamelCase : Tuple = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : int = in_proj_bias[: config.hidden_size]
_UpperCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def A__ ( ):
_UpperCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : List[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : int = get_dpt_config(UpperCAmelCase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase : Any = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : int = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
_UpperCamelCase : Union[str, Any] = DPTForSemanticSegmentation(UpperCAmelCase_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
# Check outputs on an image
_UpperCamelCase : Tuple = 4_8_0 if 'ade' in checkpoint_url else 3_8_4
_UpperCamelCase : Any = DPTImageProcessor(size=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors='pt' )
# forward pass
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ ).logits if 'ade' in checkpoint_url else model(**UpperCAmelCase_ ).predicted_depth
if show_prediction:
_UpperCamelCase : List[str] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=UpperCAmelCase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
snake_case_ : Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Tuple = size if size is not None else {'''shortest_edge''': 224}
A_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A_ : str = do_resize
A_ : Tuple = size
A_ : Optional[Any] = resample
A_ : Tuple = do_center_crop
A_ : List[Any] = crop_size
A_ : Optional[int] = do_rescale
A_ : Tuple = rescale_factor
A_ : Any = do_normalize
A_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Any = do_convert_rgb
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A_ : Any = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )->PIL.Image.Image:
'''simple docstring'''
A_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = resample if resample is not None else self.resample
A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : List[str] = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : int = do_normalize if do_normalize is not None else self.do_normalize
A_ : Tuple = image_mean if image_mean is not None else self.image_mean
A_ : Tuple = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : List[str] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A_ : Tuple = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A_ : Union[str, Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A_ : Tuple = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A_ : List[Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
A_ : str = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
A_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 186
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple[int, int]:
try:
A__ = float(lowercase_ )
except ValueError:
raise ValueError("Please enter a valid number" )
A__ = decimal - int(lowercase_ )
if fractional_part == 0:
return int(lowercase_ ), 1
else:
A__ = len(str(lowercase_ ).split("." )[1] )
A__ = int(decimal * (10**number_of_frac_digits) )
A__ = 10**number_of_frac_digits
A__, A__ = denominator, numerator
while True:
A__ = dividend % divisor
if remainder == 0:
break
A__, A__ = divisor, remainder
A__, A__ = numerator / divisor, denominator / divisor
return int(lowercase_ ), int(lowercase_ )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 354
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( lowercase_=None ) -> Any:
if subparsers is not None:
A__ = subparsers.add_parser("env" )
else:
A__ = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowercase_ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
A__ = torch.__version__
A__ = torch.cuda.is_available()
A__ = is_xpu_available()
A__ = is_npu_available()
A__ = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowercase_ ):
A__ = load_config_from_file(args.config_file ).to_dict()
A__ = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowercase_ ),
"PyTorch NPU available": str(lowercase_ ),
"System RAM": f"""{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB""",
}
if pt_cuda_available:
A__ = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
A__ = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowercase_ , lowercase_ )
else f"""\t{accelerate_config}"""
)
print(lowercase_ )
A__ = accelerate_config
return info
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = env_command_parser()
A__ = parser.parse_args()
env_command(lowercase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 230
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase = 50 ) -> int:
snake_case_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
while a != 0:
snake_case_ , snake_case_ = b % a, a
return b
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
if gcd(UpperCAmelCase , UpperCAmelCase ) != 1:
snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = 1, 0, a
snake_case_ , snake_case_ , snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 69
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowercase = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """gptj"""
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=50_400 , __lowercase=2_048 , __lowercase=4_096 , __lowercase=28 , __lowercase=16 , __lowercase=64 , __lowercase=None , __lowercase="gelu_new" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1E-5 , __lowercase=0.02 , __lowercase=True , __lowercase=50_256 , __lowercase=50_256 , __lowercase=False , **__lowercase , ) -> Tuple:
__UpperCamelCase :Any = vocab_size
__UpperCamelCase :Optional[int] = n_positions
__UpperCamelCase :Tuple = n_embd
__UpperCamelCase :int = n_layer
__UpperCamelCase :Any = n_head
__UpperCamelCase :Any = n_inner
__UpperCamelCase :Dict = rotary_dim
__UpperCamelCase :Tuple = activation_function
__UpperCamelCase :Optional[Any] = resid_pdrop
__UpperCamelCase :Any = embd_pdrop
__UpperCamelCase :List[str] = attn_pdrop
__UpperCamelCase :str = layer_norm_epsilon
__UpperCamelCase :List[Any] = initializer_range
__UpperCamelCase :Dict = use_cache
__UpperCamelCase :List[Any] = bos_token_id
__UpperCamelCase :Tuple = eos_token_id
super().__init__(
bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = "default" , __lowercase = None , __lowercase = False , ) -> Any:
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase)
if not getattr(self._config , '''pad_token_id''' , __lowercase):
# TODO: how to do that better?
__UpperCamelCase :Tuple = 0
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase :Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''')
__UpperCamelCase :str = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCamelCase :Any = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_layer
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_head
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase :Optional[int] = super(__lowercase , self).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase)
# We need to order the input in the way they appears in the forward()
__UpperCamelCase :int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCamelCase , __UpperCamelCase :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCamelCase :List[str] = seqlen + 2
__UpperCamelCase :Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase :Tuple = [
(torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(self.num_layers)
]
__UpperCamelCase :Tuple = common_inputs['''attention_mask''']
if self.use_past:
__UpperCamelCase :Tuple = ordered_inputs['''attention_mask'''].dtype
__UpperCamelCase :Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase)] , dim=1)
return ordered_inputs
@property
def UpperCamelCase__ ( self) -> int:
return 13
| 105
| 1
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 312
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None
| 312
| 1
|
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""")
elif isinstance(_lowercase , _lowercase):
raise TypeError("""Input value must be a 'int' type""")
return bin(_lowercase).count("""1""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266
|
import torch
from diffusers import StableDiffusionPipeline
_lowercase : Optional[int] ="path-to-your-trained-model"
_lowercase : Union[str, Any] =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_lowercase : Any ="A photo of sks dog in a bucket"
_lowercase : Any =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 266
| 1
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=30 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , ) -> str:
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : Tuple = image_size
__snake_case : Any = patch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Dict = is_training
__snake_case : Optional[int] = use_labels
__snake_case : Optional[Any] = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : str = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case : Dict = (image_size // patch_size) ** 2
__snake_case : List[Any] = num_patches + 1
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : int = FlaxViTModel(config=_A )
__snake_case : Any = model(_A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case : List[Any] = (self.image_size, self.image_size)
__snake_case : Tuple = (self.patch_size, self.patch_size)
__snake_case : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.type_sequence_label_size
__snake_case : Tuple = FlaxViTForImageClassification(config=_A )
__snake_case : List[str] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Union[str, Any] = 1
__snake_case : List[str] = FlaxViTForImageClassification(_A )
__snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Tuple = model(_A )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Tuple =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = FlaxViTModelTester(self )
__snake_case : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(_A )
__snake_case : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : str = [*signature.parameters.keys()]
__snake_case : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Union[str, Any] = self._prepare_for_class(_A , _A )
__snake_case : int = model_class(_A )
@jax.jit
def model_jitted(UpperCAmelCase , **UpperCAmelCase ):
return model(pixel_values=_A , **_A )
with self.subTest("JIT Enabled" ):
__snake_case : List[Any] = model_jitted(**_A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : int = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Tuple = model_class_name.from_pretrained("google/vit-base-patch16-224" )
__snake_case : int = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_A )
| 326
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : Optional[int] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 324
| 0
|
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if not nums:
return 0
UpperCamelCase :List[Any] = nums[0]
UpperCamelCase :List[str] = 0
for num in nums[1:]:
UpperCamelCase :List[Any] = (
max_excluding + num,
max(__UpperCAmelCase , __UpperCAmelCase ),
)
return max(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
|
"""simple docstring"""
import math
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : Any = len(__UpperCAmelCase )
lowerCAmelCase__ : int = int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
lowerCAmelCase__ : Optional[int] = 0
while arr[min(__UpperCAmelCase , __UpperCAmelCase ) - 1] < x:
lowerCAmelCase__ : Any = step
step += int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCAmelCase__ : List[Any] = prev + 1
if prev == min(__UpperCAmelCase , __UpperCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
_A = int(input("""Enter the number to be searched:\n"""))
_A = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 242
| 0
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = eval_examples
_snake_case = post_process_function
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = "eval" ):
"""simple docstring"""
_snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
_snake_case = self.get_eval_dataloader(lowerCAmelCase_ )
_snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_snake_case = time.time()
try:
_snake_case = eval_loop(
lowerCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_snake_case = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions )
_snake_case = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
_snake_case = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
else:
_snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase_ )
return metrics
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_ = "test" ):
"""simple docstring"""
_snake_case = self.get_test_dataloader(lowerCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_snake_case = time.time()
try:
_snake_case = eval_loop(
lowerCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_snake_case = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , output.predictions , 'predict' )
_snake_case = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
_snake_case = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase_ )
| 160
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : List[str] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """marian"""
__lowercase = ["""past_key_values"""]
__lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase_=5_81_01 , lowerCAmelCase_=None , lowerCAmelCase_=10_24 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=False , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = decoder_vocab_size or vocab_size
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = use_cache
_snake_case = encoder_layers
_snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
class __UpperCAmelCase ( _lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case = {0: 'batch'}
_snake_case = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowerCAmelCase_ ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super().outputs
else:
_snake_case = super(lowerCAmelCase_ , self ).outputs
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowerCAmelCase_ ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Generate decoder inputs
_snake_case = seq_length if not self.use_past else 1
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_snake_case = dict(**lowerCAmelCase_ , **lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
_snake_case = common_inputs['decoder_input_ids'].shape[1]
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = decoder_seq_length + 3
_snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_snake_case = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ )] , dim=1 )
_snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_snake_case , _snake_case = self.num_layers
_snake_case = min(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = max(lowerCAmelCase_ , lowerCAmelCase_ ) - min_num_layers
_snake_case = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
) )
# TODO: test this.
_snake_case = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase_ , lowerCAmelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case , _snake_case = self.num_layers
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = common_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(lowerCAmelCase_ )
]
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case = tokenizer.num_special_tokens_to_add(lowerCAmelCase_ )
_snake_case = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
_snake_case = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_snake_case = dict(tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
else:
_snake_case = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super()._flatten_past_key_values_(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = super(lowerCAmelCase_ , self )._flatten_past_key_values_(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 1E-4
| 160
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : List[str] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
lowercase_ = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
lowercase_ = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase ):
A_ : Tuple = 'resnet'
A_ : List[Any] = ['basic', 'bottleneck']
def __init__(self : Optional[Any] , a__ : Dict=3 , a__ : str=64 , a__ : Dict=[256, 512, 1024, 2048] , a__ : str=[3, 4, 6, 3] , a__ : Union[str, Any]="bottleneck" , a__ : int="relu" , a__ : List[Any]=False , a__ : Optional[Any]=None , a__ : Union[str, Any]=None , **a__ : int , ):
"""simple docstring"""
super().__init__(**a__ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
__snake_case = num_channels
__snake_case = embedding_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = layer_type
__snake_case = hidden_act
__snake_case = downsample_in_first_stage
__snake_case = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(a__ ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : int = version.parse('1.11' )
@property
def a (self : Any ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a (self : Union[str, Any] ):
"""simple docstring"""
return 1E-3
| 238
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = (DPMSolverSinglestepScheduler,)
A_ : Union[str, Any] = (('num_inference_steps', 25),)
def a (self : Dict , **a__ : Tuple ):
"""simple docstring"""
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**a__ )
return config
def a (self : str , a__ : Any=0 , **a__ : Tuple ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case , __snake_case = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
def a (self : List[Any] , a__ : Dict=0 , **a__ : List[str] ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : int , a__ : Tuple=None , **a__ : List[str] ):
"""simple docstring"""
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def a (self : str ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = 50
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def a (self : int ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type='''dpmsolver++''' , solver_order=a__ , solver_type=a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
__snake_case = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a (self : Tuple ):
"""simple docstring"""
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type='''learned_range''' )
def a (self : int ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.full_loop(use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 238
| 1
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(PATH_TO_TRANSFORMERS)
__a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__a = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
snake_case__ : str = True
# Deal with multi-line cases
elif (
re.search(
rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _lowerCAmelCase , )
is not None
):
snake_case__ : Optional[Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
snake_case__ : Union[str, Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
snake_case__ : List[Any] = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
snake_case__ : Union[str, Any] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
snake_case__ : Union[str, Any] = True
if not attribute_used:
snake_case__ : Any = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
snake_case__ : Dict = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
snake_case__ : Any = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
snake_case__ : Union[str, Any] = True
elif attribute.endswith("""_token_id""" ):
snake_case__ : Union[str, Any] = True
# configuration class specific cases
if not case_allowed:
snake_case__ : str = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
snake_case__ : Tuple = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __snake_case( _lowerCAmelCase ) -> str:
snake_case__ : List[str] = dict(inspect.signature(config_class.__init__ ).parameters )
snake_case__ : Tuple = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
snake_case__ : List[str] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
snake_case__ : str = {}
if len(config_class.attribute_map ) > 0:
snake_case__ : Dict = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
snake_case__ : int = inspect.getsourcefile(_lowerCAmelCase )
snake_case__ : int = os.path.dirname(_lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
snake_case__ : List[str] = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for fn in os.listdir(_lowerCAmelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
snake_case__ : Dict = []
for path in modeling_paths:
if os.path.isfile(_lowerCAmelCase ):
with open(_lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
snake_case__ : List[str] = []
for config_param, default_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
snake_case__ : Tuple = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_lowerCAmelCase )
def __snake_case( ) -> List[str]:
snake_case__ : str = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
snake_case__ : List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCAmelCase : inspect.isclass(_lowerCAmelCase )
and issubclass(_lowerCAmelCase , _lowerCAmelCase )
and inspect.getmodule(_lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
snake_case__ : Union[str, Any] = check_config_attributes_being_used(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
snake_case__ : Union[str, Any] = unused_attributes
if len(_lowerCAmelCase ) > 0:
snake_case__ : str = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 35
|
import numpy as np
def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
__A = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__A = False
__A = 0
__A = 0
__A = 1E12
while not convergence:
# Multiple matrix by the vector.
__A = np.dot(a_ , a_ )
# Normalize the resulting output vector.
__A = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__A = vector.conj().T if is_complex else vector.T
__A = np.dot(a_ , np.dot(a_ , a_ ) )
# Check convergence.
__A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__A = True
__A = lambda_
if is_complex:
__A = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
__A = np.array([4_1, 4, 2_0] )
__A = real_input_matrix.astype(np.complexaaa )
__A = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__A = real_input_matrix
__A = real_vector
elif problem_type == "complex":
__A = complex_input_matrix
__A = complex_vector
# Our implementation.
__A , __A = power_iteration(a_ , a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__A , __A = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
__A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15
| 0
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : Any = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : Dict = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : Any = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : List[str] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : List[str] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 353
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE__ = "hf-internal-testing/tiny-random-bert"
SCREAMING_SNAKE_CASE__ = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
SCREAMING_SNAKE_CASE__ = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = cached_file(lowerCAmelCase , lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase , lowerCAmelCase ) ) )
with open(os.path.join(lowerCAmelCase , 'refs' , 'main' ) ) as f:
snake_case = f.read()
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , 'snapshots' , lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(os.path.isfile(lowerCAmelCase ) )
# File is cached at the same place the second time.
snake_case = cached_file(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# Using a specific revision to test the full commit hash.
snake_case = cached_file(lowerCAmelCase , lowerCAmelCase , revision='9b8c223' )
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , 'snapshots' , lowerCAmelCase , lowerCAmelCase ) )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase , 'is not a valid model identifier' ):
snake_case = cached_file('tiny-random-bert' , lowerCAmelCase )
with self.assertRaisesRegex(lowerCAmelCase , 'is not a valid git identifier' ):
snake_case = cached_file(lowerCAmelCase , lowerCAmelCase , revision='aaaa' )
with self.assertRaisesRegex(lowerCAmelCase , 'does not appear to have a file named' ):
snake_case = cached_file(lowerCAmelCase , 'conf' )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase , 'does not appear to have a file named' ):
snake_case = cached_file(lowerCAmelCase , 'conf' )
with open(os.path.join(lowerCAmelCase , 'refs' , 'main' ) ) as f:
snake_case = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , '.no_exist' , lowerCAmelCase , 'conf' ) ) )
snake_case = cached_file(lowerCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
snake_case = cached_file(lowerCAmelCase , 'conf' , local_files_only=lowerCAmelCase , _raise_exceptions_for_missing_entries=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase ) as mock_head:
snake_case = cached_file(lowerCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , lowerCAmelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , lowerCAmelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , lowerCAmelCase ) )
def snake_case ( self ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , lowerCAmelCase , revision='ahaha' )
snake_case = get_file_from_repo('bert-base-cased' , lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case = json.loads(open(lowerCAmelCase , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 7_68 )
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = Path(lowerCAmelCase ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase , 'a.txt' ) , str(lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase , 'b.txt' ) )
| 149
| 0
|
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : list[float], UpperCamelCase__ : Any ):
'''simple docstring'''
print(f"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(UpperCamelCase__ ):
print(f"{i}\t\t{d}" )
def _a( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _a( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =[float('''inf''' )] * vertex_count
SCREAMING_SNAKE_CASE__ : Optional[int] =0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =distance[u] + w
SCREAMING_SNAKE_CASE__ : Union[str, Any] =check_negative_cycle(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = int(input('Enter number of vertices: ').strip())
a_ = int(input('Enter number of edges: ').strip())
a_ = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
a_ , a_ , a_ = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
a_ = {'src': src, 'dst': dest, 'weight': weight}
a_ = int(input('\nEnter shortest path source:').strip())
a_ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 152
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
__snake_case : Optional[int] = proj_size
__snake_case : str = CLIPVisionModel(UpperCAmelCase )
__snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase )
__snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size )
__snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.model(pixel_values=UpperCAmelCase )
__snake_case : Optional[int] = clip_output.pooler_output
__snake_case : Any = self.mapper(latent_states[:, None] )
__snake_case : Any = self.final_layer_norm(UpperCAmelCase )
__snake_case : str = self.proj_out(UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case : List[Any] = (config.num_hidden_layers + 1) // 5
__snake_case : Dict = config.hidden_size
__snake_case : str = 1
__snake_case : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase )
for _ in range(UpperCAmelCase )
] )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
for block in self.blocks:
__snake_case : int = block(UpperCAmelCase )
return hidden_states
| 326
| 0
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCamelCase__ : int = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
UpperCamelCase__ : Union[str, Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
UpperCamelCase__ : Union[str, Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCamelCase , hypotheses=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase )
}
| 164
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 50 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ) -> Union[Tuple, ImagePipelineOutput]:
A_ : str = self.unet.config.sample_size
A_ : Optional[int] = (batch_size, 3, img_size, img_size)
A_ : Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A_ : Dict = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A_ : Optional[Any] = self.scheduler.schedule[t]
A_ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A_ , A_ : List[Any] = self.scheduler.add_noise_to_input(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A_ : List[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : int = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A_ : Any = self.scheduler.step_correct(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
A_ : Tuple = step_output.prev_sample
A_ : Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 )
A_ : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Dict = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 164
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __snake_case ( lowerCamelCase__ ):
def __init__( self , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =params
UpperCAmelCase : List[Any] =np.array(UpperCamelCase_ )
UpperCAmelCase : Dict =np.array([len(UpperCamelCase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , snake_case__ ) -> Tuple:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> str:
'''simple docstring'''
return len(self.lengths )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict =self.params.max_model_input_size
UpperCAmelCase : int =self.lengths > max_len
logger.info(f'''Splitting {sum(UpperCamelCase_ )} too long sequences.''' )
def divide_chunks(snake_case__ , snake_case__ ):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ )]
UpperCAmelCase : List[Any] =[]
UpperCAmelCase : List[Any] =[]
if self.params.mlm:
UpperCAmelCase : Union[str, Any] =self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
UpperCAmelCase : Any =self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCAmelCase : str =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCAmelCase : Any =np.insert(UpperCamelCase_ , 0 , UpperCamelCase_ )
if sub_s[-1] != sep_id:
UpperCAmelCase : int =np.insert(UpperCamelCase_ , len(UpperCamelCase_ ) , UpperCamelCase_ )
assert len(UpperCamelCase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase_ )
new_tok_ids.extend(UpperCamelCase_ )
new_lengths.extend([len(UpperCamelCase_ ) for l in sub_seqs] )
UpperCAmelCase : Optional[int] =np.array(UpperCamelCase_ )
UpperCAmelCase : int =np.array(UpperCamelCase_ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =len(self )
UpperCAmelCase : Union[str, Any] =self.lengths > 11
UpperCAmelCase : Tuple =self.token_ids[indices]
UpperCAmelCase : List[Any] =self.lengths[indices]
UpperCAmelCase : str =len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCAmelCase : Union[str, Any] =self.params.special_tok_ids['''unk_token''']
UpperCAmelCase : str =len(self )
UpperCAmelCase : Dict =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCAmelCase : List[Any] =(unk_occs / self.lengths) < 0.5
UpperCAmelCase : Optional[Any] =self.token_ids[indices]
UpperCAmelCase : Optional[Any] =self.lengths[indices]
UpperCAmelCase : Optional[Any] =len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] =[t[0] for t in batch]
UpperCAmelCase : Tuple =[t[1] for t in batch]
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
# Max for paddings
UpperCAmelCase : List[str] =max(UpperCamelCase_ )
# Pad token ids
if self.params.mlm:
UpperCAmelCase : Union[str, Any] =self.params.special_tok_ids['''pad_token''']
else:
UpperCAmelCase : List[str] =self.params.special_tok_ids['''unk_token''']
UpperCAmelCase : Tuple =[list(t.astype(UpperCamelCase_ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase_ )) for t in token_ids]
assert len(tk_ ) == len(UpperCamelCase_ )
assert all(len(UpperCamelCase_ ) == max_seq_len_ for t in tk_ )
UpperCAmelCase : int =torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCAmelCase : int =torch.tensor(UpperCamelCase_ ) # (bs)
return tk_t, lg_t
| 348
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : Any = BlipImageProcessor()
__lowercase : Optional[int] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__lowercase : str = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__lowercase : str = InstructBlipProcessor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).qformer_tokenizer
def _lowerCamelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase : Any = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> str:
__lowercase : Any = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__lowercase : List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase : Dict = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__lowercase : int = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Any = self.get_image_processor()
__lowercase : str = self.get_tokenizer()
__lowercase : Any = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : int = self.prepare_image_inputs()
__lowercase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
__lowercase : Tuple = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ) -> str:
__lowercase : str = self.get_image_processor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[Any] = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : Dict = '''lower newer'''
__lowercase : int = processor(text=UpperCamelCase_ )
__lowercase : List[str] = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
__lowercase : Union[str, Any] = qformer_tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Optional[int] = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : Optional[int] = '''lower newer'''
__lowercase : Any = self.prepare_image_inputs()
__lowercase : List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Any = self.get_image_processor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : Any = self.get_qformer_tokenizer()
__lowercase : Tuple = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : List[str] = processor.batch_decode(UpperCamelCase_ )
__lowercase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : List[str] = self.get_image_processor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : List[Any] = self.get_qformer_tokenizer()
__lowercase : Optional[Any] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : Any = '''lower newer'''
__lowercase : Union[str, Any] = self.prepare_image_inputs()
__lowercase : Union[str, Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 249
| 0
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowercase =logging.get_logger(__name__)
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_UpperCAmelCase : List[Any] =json.loads(__lowerCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_UpperCAmelCase : List[Any] =os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_UpperCAmelCase : Dict =json.loads(__lowerCamelCase )
if not mpi_options.get('sagemaker_mpi_enabled' , __lowerCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =field(
default="" ,metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} ,)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , snake_case , )
@cached_property
def lowerCAmelCase ( self) -> "torch.device":
'''simple docstring'''
logger.info('PyTorch: setting up devices')
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch')
if self.no_cuda:
_UpperCAmelCase : List[Any] =torch.device('cpu')
_UpperCAmelCase : Any =0
elif is_sagemaker_model_parallel_available():
_UpperCAmelCase : Optional[Any] =smp.local_rank()
_UpperCAmelCase : Optional[int] =torch.device('cuda' , snake_case)
_UpperCAmelCase : Dict =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta)
_UpperCAmelCase : Union[str, Any] =int(os.getenv('SMDATAPARALLEL_LOCAL_RANK'))
_UpperCAmelCase : int =torch.device('cuda' , self.local_rank)
_UpperCAmelCase : Optional[int] =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_UpperCAmelCase : List[Any] =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_UpperCAmelCase : List[Any] =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta)
_UpperCAmelCase : Any =torch.device('cuda' , self.local_rank)
_UpperCAmelCase : List[str] =1
if device.type == "cuda":
torch.cuda.set_device(snake_case)
return device
@property
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
return False
| 242
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> None:
'''simple docstring'''
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case)
| 242
| 1
|
'''simple docstring'''
from math import sqrt
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
UpperCAmelCase_ : Optional[Any] = True
# 0 and 1 are none primes.
if number <= 1:
UpperCAmelCase_ : Optional[Any] = False
for divisor in range(2, int(round(sqrt(SCREAMING_SNAKE_CASE__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCAmelCase_ : List[str] = False
break
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), "'status' must been from type bool"
return status
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCAmelCase_ : int = list(range(2, n + 1 ) )
UpperCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1, len(SCREAMING_SNAKE_CASE__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCAmelCase_ : Any = 0
# filters actual prime numbers.
UpperCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
UpperCAmelCase_ : Optional[int] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(SCREAMING_SNAKE_CASE__ ):
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and number >= 0, "'number' must been an int and >= 0"
UpperCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(SCREAMING_SNAKE_CASE__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(SCREAMING_SNAKE_CASE__ ):
while quotient != 1:
if is_prime(SCREAMING_SNAKE_CASE__ ) and (quotient % factor == 0):
ans.append(SCREAMING_SNAKE_CASE__ )
quotient /= factor
else:
factor += 1
else:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCAmelCase_ : List[str] = 0
# prime factorization of 'number'
UpperCAmelCase_ : Tuple = prime_factorization(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = max(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCAmelCase_ : Any = 0
# prime factorization of 'number'
UpperCAmelCase_ : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = min(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 == 0, SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 != 0, SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE__ )
), "'number' must been an int, even and > 2"
UpperCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCAmelCase_ : str = get_prime_numbers(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
# run variable for while-loops.
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Any = None
# exit variable. for break up the loops
UpperCAmelCase_ : Union[str, Any] = True
while i < len_pn and loop:
UpperCAmelCase_ : Any = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCAmelCase_ : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and (len(SCREAMING_SNAKE_CASE__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : str ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCAmelCase_ : List[Any] = 0
while numbera != 0:
UpperCAmelCase_ : Optional[int] = numbera % numbera
UpperCAmelCase_ : int = numbera
UpperCAmelCase_ : str = rest
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Dict ) -> str:
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCAmelCase_ : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCAmelCase_ : Dict = prime_factorization(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = prime_factorization(SCREAMING_SNAKE_CASE__ )
elif numbera == 1 or numbera == 1:
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Tuple = max(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : int = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCAmelCase_ : Tuple = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(max(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ):
ans *= n
else:
UpperCAmelCase_ : Union[str, Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCAmelCase_ : Union[str, Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'number' must been a positive int"
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
ans += 1
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and is_prime(
SCREAMING_SNAKE_CASE__ ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
assert (
is_prime(SCREAMING_SNAKE_CASE__ ) and is_prime(SCREAMING_SNAKE_CASE__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCAmelCase_ : Any = p_number_a + 1 # jump to the next number
UpperCAmelCase_ : int = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
while number < p_number_a:
ans.append(SCREAMING_SNAKE_CASE__ )
number += 1
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and ans[0] != p_number_a
and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (n >= 1), "'n' must been int and >= 1"
UpperCAmelCase_ : Union[str, Any] = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCAmelCase_ : str = get_divisors(SCREAMING_SNAKE_CASE__ )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and (divisors[0] == 1)
and (divisors[len(SCREAMING_SNAKE_CASE__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCAmelCase_ : List[Any] = gcd(abs(SCREAMING_SNAKE_CASE__ ), abs(SCREAMING_SNAKE_CASE__ ) )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been a int and >= 0"
UpperCAmelCase_ : int = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been an int and >= 0"
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
UpperCAmelCase_ : Optional[int] = ans
ans += fiba
UpperCAmelCase_ : Dict = tmp
return ans
| 125
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
snake_case_ : Any = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __a (unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(__magic_name__ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id='''test-model-flax''' , push_to_hub=__magic_name__ , use_auth_token=self._token )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase_ : str = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1E-3 , msg=F"""{key} not identical""" )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Tuple = FlaxBertModel(__magic_name__ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__magic_name__ , use_auth_token=self._token )
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : List[str] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1E-3 , msg=F"""{key} not identical""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Union[str, Any] = flatten_dict(modela.params )
UpperCAmelCase_ : List[Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCAmelCase_ : List[str] = False
return models_are_equal
@require_flax
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(__magic_name__ )
UpperCAmelCase_ : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel(__magic_name__ )
UpperCAmelCase_ : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size='''10KB''' )
with self.assertRaises(__magic_name__ ):
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = '''bert'''
UpperCAmelCase_ : str = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__magic_name__ ):
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = '''bert'''
UpperCAmelCase_ : str = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__magic_name__ ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 125
| 1
|
from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( UpperCamelCase__ : list ):
'''simple docstring'''
if not postfix_notation:
return 0
UpperCamelCase__ = {'''+''', '''-''', '''*''', '''/'''}
UpperCamelCase__ = []
for token in postfix_notation:
if token in operations:
UpperCamelCase__ , UpperCamelCase__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[Any] ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_a ) , torch_builtin(_a ) ) )
self.assertFalse(torch.allclose(gelu_python(_a ) , gelu_new(_a ) ) )
def A_ ( self : Tuple ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = get_activation('''gelu_10''' )
UpperCamelCase__ = torch_builtin(_a )
UpperCamelCase__ = geluaa(_a )
UpperCamelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : str ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_a ):
get_activation('''bogus''' )
with self.assertRaises(_a ):
get_activation(_a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = 1
UpperCamelCase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_a ):
UpperCamelCase__ = acta.a
| 35
| 0
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : Tuple = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : int = use_attention_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : List[str] = vocab_size
__snake_case : int = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[Any] = num_choices
__snake_case : Union[str, Any] = rescale_embeddings
__snake_case : List[Any] = attention_type
__snake_case : str = use_bias
__snake_case : Dict = block_size
__snake_case : Optional[Any] = num_random_blocks
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_attention_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : Dict =False
UpperCAmelCase_ : str =False
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int:
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 326
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple:
# Load configuration defined in the metadata file
with open(lowercase ) as metadata_file:
__snake_case : int = json.load(lowercase )
__snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Tuple = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
__snake_case : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
__snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : Tuple = json.load(lowercase )
__snake_case : List[Any] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
__snake_case : Any = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
__snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : List[Any] = state_dict[bias_name]
__snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self."""
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
__snake_case : str = state_dict[prefix + matrix_name]
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : str = state_dict[key]
else:
__snake_case : str = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
__snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Union[str, Any] = (0, 9)
__snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Any = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Optional[Any] = torch.Size((1, 33, 768) )
__snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 1, 768) )
__snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : str = MLukeTokenizer.from_pretrained(lowercase )
__snake_case : Dict = "Tokyo is the capital of <mask>."
__snake_case : Union[str, Any] = (24, 30)
__snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : int = model(**lowercase )
__snake_case : Dict = encoding["input_ids"][0].tolist()
__snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
__snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]:
__snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )]
__snake_case : Any = {}
for entry in data:
__snake_case : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Optional[int] = entity_id
break
__snake_case : Union[str, Any] = f"""{language}:{entity_name}"""
__snake_case : Any = entity_id
return new_mapping
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = filter(lambda a__ : p.requires_grad , model.parameters() )
__a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
A : List[str] = logging.getLogger(__name__)
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
if metric == "rouge2":
__a = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__a = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__a = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__a = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase ( a__ , a__ ) -> int:
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class __A( pl.Callback ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Dict:
'''simple docstring'''
__a = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__snake_case )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case=True ) -> None:
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__a = Path(pl_module.hparams.output_dir )
if type_path == "test":
__a = od / '''test_results.txt'''
__a = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__a = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__a = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__snake_case )
generations_file.parent.mkdir(exist_ok=__snake_case )
with open(__snake_case , '''a+''' ) as writer:
for key in sorted(__snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
__a = metrics[key]
if isinstance(__snake_case , torch.Tensor ):
__a = val.item()
__a = F"""{key}: {val:.6f}\n"""
writer.write(__snake_case )
if not save_generations:
return
if "preds" in metrics:
__a = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__snake_case )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
try:
__a = pl_module.model.model.num_parameters()
except AttributeError:
__a = pl_module.model.num_parameters()
__a = count_trainable_parameters(__snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__snake_case , __snake_case , '''test''' )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 353
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 33
| 0
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 100 ) -> int:
'''simple docstring'''
UpperCAmelCase = (n * (n + 1) // 2) ** 2
UpperCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 273
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273
| 1
|
"""simple docstring"""
def __lowerCamelCase ( a_ : Any , a_ : int , a_ : Tuple , a_ : Any ) -> int:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = mf_knapsack(i - 1 , a_ , a_ , a_ )
else:
__SCREAMING_SNAKE_CASE :Tuple = max(
mf_knapsack(i - 1 , a_ , a_ , a_ ) , mf_knapsack(i - 1 , a_ , a_ , j - wt[i - 1] ) + val[i - 1] , )
__SCREAMING_SNAKE_CASE :List[str] = val
return f[i][j]
def __lowerCamelCase ( a_ : Optional[Any] , a_ : str , a_ : Optional[Any] , a_ : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__SCREAMING_SNAKE_CASE :Optional[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def __lowerCamelCase ( a_ : int , a_ : list , a_ : list ) -> Dict:
if not (isinstance(a_ , (list, tuple) ) and isinstance(a_ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
__SCREAMING_SNAKE_CASE :Dict = len(a_ )
if num_items != len(a_ ):
__SCREAMING_SNAKE_CASE :List[str] = (
'''The number of weights must be the same as the number of values.\n'''
f'''But got {num_items} weights and {len(a_ )} values'''
)
raise ValueError(a_ )
for i in range(a_ ):
if not isinstance(wt[i] , a_ ):
__SCREAMING_SNAKE_CASE :Tuple = (
'''All weights must be integers but got weight of '''
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(a_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = knapsack(a_ , a_ , a_ , a_ )
__SCREAMING_SNAKE_CASE :set = set()
_construct_solution(a_ , a_ , a_ , a_ , a_ )
return optimal_val, example_optional_set
def __lowerCamelCase ( a_ : list , a_ : list , a_ : int , a_ : int , a_ : set ) -> Any:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(a_ , a_ , i - 1 , a_ , a_ )
else:
optimal_set.add(a_ )
_construct_solution(a_ , a_ , i - 1 , j - wt[i - 1] , a_ )
if __name__ == "__main__":
lowerCamelCase_ = [3, 2, 4, 4]
lowerCamelCase_ = [4, 3, 2, 3]
lowerCamelCase_ = 4
lowerCamelCase_ = 6
lowerCamelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase_ , lowerCamelCase_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase_ , lowerCamelCase_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 239
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''xlm-prophetnet'''
SCREAMING_SNAKE_CASE_ : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = "gelu" ,SCREAMING_SNAKE_CASE__ = 3_05_22 ,SCREAMING_SNAKE_CASE__ = 10_24 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 5_12 ,SCREAMING_SNAKE_CASE__ = 0.0_2 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 2 ,SCREAMING_SNAKE_CASE__ = 32 ,SCREAMING_SNAKE_CASE__ = 1_28 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = 2 ,**SCREAMING_SNAKE_CASE__ ,) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = vocab_size
__SCREAMING_SNAKE_CASE :Tuple = hidden_size
__SCREAMING_SNAKE_CASE :Optional[int] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE :Optional[int] = num_encoder_layers
__SCREAMING_SNAKE_CASE :Tuple = num_encoder_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_decoder_layers
__SCREAMING_SNAKE_CASE :Optional[Any] = num_decoder_attention_heads
__SCREAMING_SNAKE_CASE :List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Dict = init_std # Normal(0, this parameter)
__SCREAMING_SNAKE_CASE :List[Any] = activation_function
# parameters for xlmprophetnet
__SCREAMING_SNAKE_CASE :Tuple = ngram
__SCREAMING_SNAKE_CASE :int = num_buckets
__SCREAMING_SNAKE_CASE :Optional[int] = relative_max_distance
__SCREAMING_SNAKE_CASE :Union[str, Any] = disable_ngram_loss
__SCREAMING_SNAKE_CASE :Dict = eps
# 3 Types of Dropout
__SCREAMING_SNAKE_CASE :List[str] = attention_dropout
__SCREAMING_SNAKE_CASE :Dict = activation_dropout
__SCREAMING_SNAKE_CASE :Union[str, Any] = dropout
__SCREAMING_SNAKE_CASE :int = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,add_cross_attention=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 239
| 1
|
"""simple docstring"""
from collections import namedtuple
__UpperCamelCase : Optional[int] = namedtuple('''from_to''', '''from_ to''')
__UpperCamelCase : List[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1_0_0_0),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
| 1
|
def lowerCAmelCase_ ( __UpperCAmelCase: list ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCamelCase__ : str = grid[0]
for row_n in range(1 , len(__UpperCAmelCase ) ):
UpperCamelCase__ : List[str] = grid[row_n]
UpperCamelCase__ : Union[str, Any] = fill_row(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase__ : List[str] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCamelCase__ : Dict = Dataset.from_dict(__UpperCAmelCase )
return dataset
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = get_dataset()
UpperCamelCase__ : List[str] = make_duplicate_clusters(__magic_name__, 0.85 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = get_dataset()
UpperCamelCase__ ,UpperCamelCase__ : Dict = deduplicate_dataset(__magic_name__ )
self.assertEqual(len(__magic_name__ ), 2 )
print(__magic_name__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], __magic_name__ )
| 247
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__a = logging.getLogger(__name__)
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = """token-classification"""
def __init__( self: Any , snake_case: Tuple ) -> List[Any]:
if type(snake_case ) == dict:
snake_case_ :Optional[int] = Namespace(**snake_case )
snake_case_ :Optional[int] = import_module("""tasks""" )
try:
snake_case_ :Any = getattr(snake_case , hparams.task_type )
snake_case_ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels )
snake_case_ :str = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any:
return self.model(**snake_case )
def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]:
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Optional[Any] = self(**snake_case )
snake_case_ :List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_ :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
snake_case_ :Optional[int] = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :Optional[int] = torch.load(snake_case )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
snake_case_ :Any = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , snake_case )
torch.save(snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader:
snake_case_ :int = self._feature_file(snake_case )
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :str = torch.load(snake_case )
snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]:
"""Compute validation""" ""
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :Dict = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Dict = self(**snake_case )
snake_case_, snake_case_ :Dict = outputs[:2]
snake_case_ :Union[str, Any] = logits.detach().cpu().numpy()
snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple:
snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
snake_case_ :Tuple = np.argmax(snake_case , axis=2 )
snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case_ :Optional[Any] = dict(enumerate(self.labels ) )
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case_ :str = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(snake_case , snake_case ),
"""precision""": precision_score(snake_case , snake_case ),
"""recall""": recall_score(snake_case , snake_case ),
"""f1""": fa_score(snake_case , snake_case ),
}
snake_case_ :List[Any] = dict(results.items() )
snake_case_ :Union[str, Any] = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]:
# when stable
snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case )
snake_case_ :str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any:
# updating to test_epoch_end instead of deprecated test_end
snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case_ :Optional[int] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__a = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__a = NERTransformer.add_model_specific_args(parser, os.getcwd())
__a = parser.parse_args()
__a = NERTransformer(args)
__a = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__a = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 66
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66
| 1
|
"""simple docstring"""
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _snake_case ( _snake_case : Dict , _snake_case : Tuple , _snake_case : Optional[int]=0 ):
# Format the message.
if name is None:
lowerCAmelCase : Tuple = None
else:
lowerCAmelCase : Optional[int] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
lowerCAmelCase : Dict = fmt.format(_snake_case )
# Print and recurse (if needed).
if isinstance(_snake_case , _snake_case ):
if msg is not None:
print(_snake_case )
for k in val.keys():
recursive_print(_snake_case , val[k] , spaces + 2 )
elif isinstance(_snake_case , torch.Tensor ):
print(_snake_case , ''':''' , val.size() )
else:
print(_snake_case , ''':''' , _snake_case )
def _snake_case ( _snake_case : str , _snake_case : int , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase : Any = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase : List[str] = param.view(*_snake_case )
lowerCAmelCase : List[str] = param.transpose(0 , 2 )
lowerCAmelCase : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase : Dict = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase : int = param.view(*_snake_case )
lowerCAmelCase : Tuple = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase : Any = param.view(*_snake_case )
return param
def _snake_case ( _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] ):
# The converted output model.
lowerCAmelCase : Any = {}
# old versions did not store training args
lowerCAmelCase : str = input_state_dict.get('''args''' , _snake_case )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase : Any = ds_args.padded_vocab_size
lowerCAmelCase : Optional[Any] = ds_args.max_position_embeddings
lowerCAmelCase : Tuple = ds_args.hidden_size
lowerCAmelCase : Any = ds_args.num_layers
lowerCAmelCase : Optional[Any] = ds_args.num_attention_heads
lowerCAmelCase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase : Union[str, Any] = config.n_head
# The hidden_size per head.
lowerCAmelCase : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase : int = input_state_dict['''checkpoint_version''']
else:
lowerCAmelCase : Optional[Any] = 0.0
# The model.
lowerCAmelCase : str = input_state_dict['''model''']
# The language model.
lowerCAmelCase : List[str] = model['''language_model''']
# The embeddings.
lowerCAmelCase : List[Any] = lm['''embedding''']
# The word embeddings.
lowerCAmelCase : Optional[Any] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase : str = word_embeddings[: config.vocab_size, :]
lowerCAmelCase : Tuple = word_embeddings
# The position embeddings.
lowerCAmelCase : str = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase : str = pos_embeddings
# The transformer.
lowerCAmelCase : Tuple = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
lowerCAmelCase : int = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
lowerCAmelCase : List[str] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase : int = layer_re.match(_snake_case )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase : Any = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase : List[Any] = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase : Tuple = m.group(3 )
# The name of the layer.
lowerCAmelCase : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
lowerCAmelCase : List[str] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
lowerCAmelCase : Any = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase : Union[str, Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _snake_case , _snake_case )
lowerCAmelCase : Optional[int] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase : int = torch.tensor(-1E4 , dtype=torch.floataa )
lowerCAmelCase : str = masked_bias
lowerCAmelCase : Any = fix_query_key_value_ordering(_snake_case , _snake_case , 3 , _snake_case , _snake_case )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase : str = fix_query_key_value_ordering(_snake_case , _snake_case , 3 , _snake_case , _snake_case )
# Store. No change of shape.
lowerCAmelCase : int = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase : Union[str, Any] = megatron_to_transformers[op_name]
lowerCAmelCase : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase : Dict = megatron_to_transformers[op_name]
lowerCAmelCase : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase : Tuple = transformer['''final_layernorm.weight''']
lowerCAmelCase : Optional[int] = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase : List[str] = word_embeddings
# It should be done!
return output_state_dict
def _snake_case ( ):
# Create the argument parser.
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=_snake_case , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_snake_case , help='''An optional config json file describing the pre-trained model.''' , )
lowerCAmelCase : Optional[int] = parser.parse_args()
# Extract the basename.
lowerCAmelCase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
lowerCAmelCase : Optional[int] = torch.load(_snake_case , map_location='''cpu''' )
else:
lowerCAmelCase : int = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
lowerCAmelCase : Tuple = input_state_dict.get('''args''' , _snake_case )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase : Optional[Any] = '''gelu_fast'''
elif ds_args.openai_gelu:
lowerCAmelCase : List[Any] = '''gelu_new'''
else:
lowerCAmelCase : List[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase : str = '''gelu_new'''
# Spell out all parameters in case the defaults change.
lowerCAmelCase : Union[str, Any] = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_snake_case , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_snake_case , summary_activation=_snake_case , summary_proj_to_labels=_snake_case , summary_first_dropout=0.1 , scale_attn_weights=_snake_case , use_cache=_snake_case , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase : Dict = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase : Any = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
lowerCAmelCase : List[Any] = convert_megatron_checkpoint(_snake_case , _snake_case , _snake_case )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_snake_case , _snake_case )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase : Union[str, Any] = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase : Union[str, Any] = '''gpt2'''
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_snake_case )
lowerCAmelCase : Any = type(_snake_case ).__name__
lowerCAmelCase : str = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(_snake_case )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(_snake_case )
# Store the state_dict to file.
lowerCAmelCase : str = os.path.join(_snake_case , '''pytorch_model.bin''' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(_snake_case , _snake_case )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 354
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = image[0].size
lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase : List[str] = 2.0 * image - 1.0
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 )
return image
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : str = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = mask[0].size
lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 )
return mask
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = image
lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ )
lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ )
lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : Union[str, Any] = original_image.shape
lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device )
lowerCAmelCase : Optional[int] = eta
lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1
lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = t
lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'could not parse string as bool {string}' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
a : int = parser.parse_args()
a : Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 147
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,*_a : Optional[int] ,**_a : str ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271
| 0
|
def UpperCamelCase (lowercase_: Optional[Any] ) -> Tuple:
A__ : List[Any] = [], []
while len(lowercase_ ) > 1:
A__ : List[str] = min(lowercase_ ), max(lowercase_ )
start.append(lowercase_ )
end.append(lowercase_ )
collection.remove(lowercase_ )
collection.remove(lowercase_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
A_ : Dict = input('Enter numbers separated by a comma:\n').strip()
A_ : int = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 363
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87
| 1
|
"""simple docstring"""
import os
import sys
A_ = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A_ = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCAmelCase ( *UpperCAmelCase__ : Optional[Any], **UpperCAmelCase__ : Optional[int] ) ->Dict:
return AutoConfig.from_pretrained(*UpperCAmelCase__, **UpperCAmelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCAmelCase ( *UpperCAmelCase__ : Optional[Any], **UpperCAmelCase__ : List[str] ) ->Tuple:
return AutoTokenizer.from_pretrained(*UpperCAmelCase__, **UpperCAmelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCAmelCase ( *UpperCAmelCase__ : Any, **UpperCAmelCase__ : int ) ->List[Any]:
return AutoModel.from_pretrained(*UpperCAmelCase__, **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCAmelCase ( *UpperCAmelCase__ : List[Any], **UpperCAmelCase__ : Union[str, Any] ) ->Tuple:
return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase__, **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCAmelCase ( *UpperCAmelCase__ : Dict, **UpperCAmelCase__ : Optional[Any] ) ->Any:
return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase__, **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCAmelCase ( *UpperCAmelCase__ : str, **UpperCAmelCase__ : Any ) ->int:
return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase__, **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCAmelCase ( *UpperCAmelCase__ : Optional[int], **UpperCAmelCase__ : int ) ->int:
return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase__, **UpperCAmelCase__ )
| 363
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def snake_case_ ( snake_case , snake_case ) -> Any:
lowercase__: Any = args.log_outputs
lowercase__: Optional[int] = '''_'''.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowercase__: List[str] = load_metric('wer' )
lowercase__: List[str] = load_metric('cer' )
# compute metrics
lowercase__: Dict = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowercase__: List[Any] = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowercase__: Optional[int] = f'WER: {wer_result}\nCER: {cer_result}'
print(lowerCamelCase_ )
with open(f'{dataset_id}_eval_results.txt' , 'w' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__: Optional[int] = f'log_{dataset_id}_predictions.txt'
lowercase__: Union[str, Any] = f'log_{dataset_id}_targets.txt'
with open(lowerCamelCase_ , 'w' ) as p, open(lowerCamelCase_ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case , snake_case ):
p.write(f'{i}' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'{i}' + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def snake_case_ ( snake_case ) -> Union[str, Any]:
lowercase__: Any = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__: int = re.sub(lowerCamelCase_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__: int = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__: int = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def snake_case_ ( snake_case ) -> Dict:
lowercase__: Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__: Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__: Dict = feature_extractor.sampling_rate
# resample audio
lowercase__: int = dataset.cast_column('audio' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__: Optional[Any] = 0 if torch.cuda.is_available() else -1
lowercase__: Optional[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case ):
lowercase__: List[Any] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__: Tuple = prediction['''text''']
lowercase__: str = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowercase__: List[str] = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
__lowerCAmelCase = parser.parse_args()
main(args)
| 196
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='''utf-8''' ,check=__lowerCamelCase ,)
assert hasattr(self ,'''env''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Optional[Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__lowerCamelCase ,instance_count=__lowerCamelCase ,instance_type=self.instance_type ,debugger_hook_config=__lowerCamelCase ,hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__lowerCamelCase ,py_version='''py36''' ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
TrainingJobAnalytics(__lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.create_estimator(__lowerCamelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCAmelCase__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,__lowerCamelCase )
| 129
| 0
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
SCREAMING_SNAKE_CASE_ = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
SCREAMING_SNAKE_CASE_ = "" if has_file(args.repo_path, 'config.json') else "unet"
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
SCREAMING_SNAKE_CASE_ = reader.read()
SCREAMING_SNAKE_CASE_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
SCREAMING_SNAKE_CASE_ = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE_ = config[key]
del config[key]
SCREAMING_SNAKE_CASE_ = [k.replace('UNetRes', '') for k in config["down_block_types"]]
SCREAMING_SNAKE_CASE_ = [k.replace('UNetRes', '') for k in config["up_block_types"]]
if do_only_weights:
SCREAMING_SNAKE_CASE_ = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
SCREAMING_SNAKE_CASE_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
SCREAMING_SNAKE_CASE_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
SCREAMING_SNAKE_CASE_ = param_value
SCREAMING_SNAKE_CASE_ = True
if not has_changed:
SCREAMING_SNAKE_CASE_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 351
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
SCREAMING_SNAKE_CASE_ = trt.Logger(trt.Logger.WARNING)
SCREAMING_SNAKE_CASE_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.tokenizer_name:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
SCREAMING_SNAKE_CASE_ = args.per_device_eval_batch_size
SCREAMING_SNAKE_CASE_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
SCREAMING_SNAKE_CASE_ = 'temp_engine/bert-fp16.engine'
if args.inta:
SCREAMING_SNAKE_CASE_ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
SCREAMING_SNAKE_CASE_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
SCREAMING_SNAKE_CASE_ = [network.get_input(i) for i in range(network.num_inputs)]
SCREAMING_SNAKE_CASE_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
SCREAMING_SNAKE_CASE_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
SCREAMING_SNAKE_CASE_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
SCREAMING_SNAKE_CASE_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[str] , lowerCAmelCase: Any , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: Optional[int] ) -> List[Any]:
_UpperCAmelCase : Dict = np.asarray(inputs["input_ids"] , dtype=np.intaa )
_UpperCAmelCase : List[str] = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
_UpperCAmelCase : Union[str, Any] = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase )
# start time
_UpperCAmelCase : Dict = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase ) for d_inp in d_inputs] + [int(lowerCAmelCase ), int(lowerCAmelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
cuda.memcpy_dtoh_async(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
_UpperCAmelCase : Any = time.time()
_UpperCAmelCase : Any = end_time - start_time
_UpperCAmelCase : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
SCREAMING_SNAKE_CASE_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
SCREAMING_SNAKE_CASE_ = raw_datasets['validation'].column_names
SCREAMING_SNAKE_CASE_ = 'question' if 'question' in column_names else column_names[0]
SCREAMING_SNAKE_CASE_ = 'context' if 'context' in column_names else column_names[1]
SCREAMING_SNAKE_CASE_ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
SCREAMING_SNAKE_CASE_ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
SCREAMING_SNAKE_CASE_ = min(args.max_seq_length, tokenizer.model_max_length)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Any:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_UpperCAmelCase : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_UpperCAmelCase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=lowerCAmelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_UpperCAmelCase : List[Any] = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_UpperCAmelCase : Tuple = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_UpperCAmelCase : Tuple = tokenized_examples.sequence_ids(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_UpperCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_UpperCAmelCase : Any = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
SCREAMING_SNAKE_CASE_ = raw_datasets['validation']
# Validation Feature Creation
SCREAMING_SNAKE_CASE_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
SCREAMING_SNAKE_CASE_ = default_data_collator
SCREAMING_SNAKE_CASE_ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
SCREAMING_SNAKE_CASE_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: Dict , lowerCAmelCase: str , lowerCAmelCase: Optional[Any]="eval" ) -> Union[str, Any]:
# Post-processing: we match the start logits and end logits to answers in the original context.
_UpperCAmelCase : Tuple = postprocess_qa_predictions(
examples=lowerCAmelCase , features=lowerCAmelCase , predictions=lowerCAmelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_UpperCAmelCase : Optional[int] = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
_UpperCAmelCase : Optional[Any] = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
_UpperCAmelCase : Optional[int] = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase , label_ids=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> List[str]:
return trt.volume(engine.get_binding_shape(lowerCAmelCase ) ) * engine.get_binding_dtype(lowerCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
SCREAMING_SNAKE_CASE_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
SCREAMING_SNAKE_CASE_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
SCREAMING_SNAKE_CASE_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
SCREAMING_SNAKE_CASE_ = cuda.mem_alloc(h_outputa.nbytes)
SCREAMING_SNAKE_CASE_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
SCREAMING_SNAKE_CASE_ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F''' Num examples = {len(eval_dataset)}''')
logger.info(F''' Batch size = {args.per_device_eval_batch_size}''')
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = timeit.default_timer()
SCREAMING_SNAKE_CASE_ = None
for step, batch in enumerate(eval_dataloader):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = outputs
SCREAMING_SNAKE_CASE_ = torch.tensor(start_logits)
SCREAMING_SNAKE_CASE_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
SCREAMING_SNAKE_CASE_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
SCREAMING_SNAKE_CASE_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
SCREAMING_SNAKE_CASE_ = nested_truncate(all_preds, len(eval_dataset))
SCREAMING_SNAKE_CASE_ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
SCREAMING_SNAKE_CASE_ = post_processing_function(eval_examples, eval_dataset, all_preds)
SCREAMING_SNAKE_CASE_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'''Evaluation metrics: {eval_metric}''')
| 189
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[int] = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: Tuple = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase_: str = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase_: Optional[int] = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_: int = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase_: Any = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(_lowerCAmelCase )-1}' )
if "norm" in key:
UpperCAmelCase_: Union[str, Any] = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_: int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase_: str = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(_lowerCAmelCase )-1}' )
if "layer_norm1" in key:
UpperCAmelCase_: str = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase_: Optional[int] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_: Dict = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase_: Any = key.replace(F'block{idx}' , F'block.{int(_lowerCAmelCase )-1}' )
if "attn.q" in key:
UpperCAmelCase_: str = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase_: Tuple = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase_: List[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase_: List[str] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase_: Dict = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase_: Union[str, Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase_: Tuple = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_: Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase_: str = key.replace(F'linear_c{idx}' , F'linear_c.{int(_lowerCAmelCase )-1}' )
if "bot_conv" in key:
UpperCAmelCase_: Dict = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase_: Union[str, Any] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase_: Tuple = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase_: Any = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase_: List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase_: Optional[int] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase_: Tuple = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase_: Dict = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase_: List[str] = value
return new_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_: Tuple = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCAmelCase_: List[Any] = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_: str = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_: List[str] = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_: Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_: List[str] = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_: Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str , lowerCAmelCase__: Optional[int]=False , lowerCAmelCase__: Any=None ):
"""simple docstring"""
UpperCAmelCase_: str = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase_: Optional[Any] = GLPNImageProcessor()
# prepare image
UpperCAmelCase_: Optional[int] = prepare_img()
UpperCAmelCase_: Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase_: List[Any] = torch.load(_lowerCAmelCase , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase_: str = rename_keys(_lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_lowerCAmelCase , _lowerCAmelCase )
# create HuggingFace model and load state dict
UpperCAmelCase_: int = GLPNForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# forward pass
UpperCAmelCase_: int = model(_lowerCAmelCase )
UpperCAmelCase_: List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase_: Dict = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase_: Optional[int] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCAmelCase_: List[str] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a : Optional[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 147
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=2, A=3, A=4, A=2, A=7, A=True, A=True, A=True, A=True, A=99, A=36, A=3, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=6, A=6, A=3, A=4, A=None, A=1_000, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = text_seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : str = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE : List[str] = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : int = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Tuple = t
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : int = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[int] = model(A, pixel_values=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : List[str] = model(A, bbox=A, pixel_values=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[int] = model(A, bbox=A, pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[Any] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : str = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = False
A : List[str] = False
A : Union[str, Any] = False
A : Optional[Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : List[Any] = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self, A, A, A, A, A ):
'''simple docstring'''
return True
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self, config_class=A, hidden_size=37 )
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(A )
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(A, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=A, )
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : List[str] = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).pixel_values.to(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
input_ids=input_ids.to(A ), bbox=bbox.to(A ), pixel_values=pixel_values.to(A ), )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], A, atol=1E-4 ) )
| 246
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : int = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Any = """bloom"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : Optional[int] = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__(self : Dict , UpperCamelCase : int=250880 , UpperCamelCase : Any=64 , UpperCamelCase : List[Any]=2 , UpperCamelCase : Any=8 , UpperCamelCase : Any=1E-5 , UpperCamelCase : Any=0.02 , UpperCamelCase : str=True , UpperCamelCase : Optional[int]=1 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : List[Any]=False , **UpperCamelCase : Tuple , ):
'''simple docstring'''
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop('''n_embed''' , UpperCamelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = pretraining_tp
lowercase__ = apply_residual_connection_post_layernorm
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = slow_but_exact
super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = version.parse("""1.12""" )
def __init__(self : Optional[Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase )
if not getattr(self._config , '''pad_token_id''' , UpperCamelCase ):
# TODO: how to do that better?
lowercase__ = 0
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' , inverted_values_shape=UpperCamelCase )
lowercase__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return self._config.n_head
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return 1E-3
def UpperCamelCase__ (self : str , UpperCamelCase : "PreTrainedTokenizer" , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , ):
'''simple docstring'''
lowercase__ = super(UpperCamelCase , self ).generate_dummy_inputs(
UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
lowercase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__ ,lowercase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__ = self._config.hidden_size // self.num_attention_heads
lowercase__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__ = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
lowercase__ = common_inputs['''attention_mask''']
if self.use_past:
lowercase__ = ordered_inputs['''attention_mask'''].dtype
lowercase__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
return 13
| 2
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __A :
a__ : str = LEDConfig
a__ : Any = {}
a__ : Any = """gelu"""
def __init__(self : Tuple , __a : Union[str, Any] , __a : List[str]=13 , __a : Union[str, Any]=7 , __a : Optional[int]=True , __a : Dict=False , __a : Dict=99 , __a : List[str]=32 , __a : List[Any]=2 , __a : Any=4 , __a : Any=37 , __a : int=0.1 , __a : Tuple=0.1 , __a : Optional[int]=20 , __a : str=2 , __a : Union[str, Any]=1 , __a : Dict=0 , __a : Optional[int]=4 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase (self : Any ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase_ = prepare_led_inputs_dict(__a , __a , __a )
UpperCAmelCase_ = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
UpperCAmelCase_ = global_attention_mask
return config, inputs_dict
def _lowercase (self : List[str] , __a : str , __a : Optional[int] ):
UpperCAmelCase_ = TFLEDModel(config=__a ).get_decoder()
UpperCAmelCase_ = inputs_dict["input_ids"]
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(__a , attention_mask=__a , use_cache=__a )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ = model(__a , attention_mask=__a )[0]
UpperCAmelCase_ = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def __lowerCAmelCase ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=None , snake_case_ : List[str]=None , snake_case_ : Tuple=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : List[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
a__ : Any = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
a__ : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ : Any = True
a__ : str = False
a__ : Optional[Any] = False
a__ : int = False
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = TFLEDModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a )
def _lowercase (self : str ):
self.config_tester.run_common_tests()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = tf.zeros_like(inputs_dict["attention_mask"] )
UpperCAmelCase_ = 2
UpperCAmelCase_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.model_tester.seq_length
UpperCAmelCase_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
UpperCAmelCase_ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Dict ):
UpperCAmelCase_ = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase_ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _lowercase (self : str ):
pass
def _lowercase (self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __lowerCAmelCase ( snake_case_ : Dict ) -> str:
'''simple docstring'''
return tf.constant(snake_case_ , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_: Dict =1E-4
@slow
@require_tf
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
UpperCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
UpperCAmelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = prepare_led_inputs_dict(model.config , __a , __a )
UpperCAmelCase_ = model(**__a )[0]
UpperCAmelCase_ = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
UpperCAmelCase_ = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 )
def _lowercase (self : Any ):
UpperCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
UpperCAmelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = prepare_led_inputs_dict(model.config , __a , __a )
UpperCAmelCase_ = model(**__a )[0]
UpperCAmelCase_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
UpperCAmelCase_ = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 , rtol=1E-3 )
| 366
|
'''simple docstring'''
import os
import numpy
import onnx
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = a.name
UpperCAmelCase_ = b.name
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = a == b
UpperCAmelCase_ = name_a
UpperCAmelCase_ = name_b
return res
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Union[str, Any] ) -> Any:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case_ , snake_case_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case_ , snake_case_ )
_graph_replace_input_with(node_proto.attribute[1].g , snake_case_ , snake_case_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = list(model.graph.initializer )
UpperCAmelCase_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase_ = inits[i].name
UpperCAmelCase_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = os.path.dirname(snake_case_ )
UpperCAmelCase_ = os.path.basename(snake_case_ )
UpperCAmelCase_ = onnx.load(os.path.join(snake_case_ , snake_case_ ) )
UpperCAmelCase_ = list(model.graph.initializer )
UpperCAmelCase_ = set()
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for i in range(len(snake_case_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(snake_case_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(snake_case_ )
dup_set.add(snake_case_ )
UpperCAmelCase_ = inits[j].data_type
UpperCAmelCase_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , snake_case_ )
total_reduced_size += mem_size
UpperCAmelCase_ = inits[i].name
UpperCAmelCase_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case_ )
else:
UpperCAmelCase_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
UpperCAmelCase_ = sorted(snake_case_ )
_remove_dup_initializers_from_model(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = "optimized_" + model_file_name
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
onnx.save(snake_case_ , snake_case_ )
return new_model
| 106
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def _snake_case ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = ort.SessionOptions()
lowercase_ : List[Any] = False
return options
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = '''A red cat sitting on a park bench'''
lowercase_ : Optional[Any] = np.random.RandomState(0 )
lowercase_ : str = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ : int = output.images
lowercase_ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowercase_ : int = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
lowercase_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = '''A red cat sitting on a park bench'''
lowercase_ : Dict = np.random.RandomState(0 )
lowercase_ : int = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ : Optional[Any] = output.images
lowercase_ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowercase_ : List[str] = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 93
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self.dummy_uncond_unet
__lowercase : Any = PNDMScheduler()
__lowercase : Dict = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
__lowercase : List[str] = torch.manual_seed(0 )
__lowercase : List[str] = pndm(generator=__a , num_inference_steps=20 , output_type="""numpy""" ).images
__lowercase : Tuple = torch.manual_seed(0 )
__lowercase : Union[str, Any] = pndm(generator=__a , num_inference_steps=20 , output_type="""numpy""" , return_dict=__a )[0]
__lowercase : Any = image[0, -3:, -3:, -1]
__lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = """google/ddpm-cifar10-32"""
__lowercase : str = UNetaDModel.from_pretrained(__a )
__lowercase : int = PNDMScheduler()
__lowercase : Optional[int] = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
__lowercase : Optional[int] = torch.manual_seed(0 )
__lowercase : List[str] = pndm(generator=__a , output_type="""numpy""" ).images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : int = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 371
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 0
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCAmelCase__ ():
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = """mock-s3-bucket"""
_snake_case : Any = F"s3://{mock_bucket}"
_snake_case : List[str] = extract_path_from_uri(snake_case__ )
assert dataset_path.startswith("""s3://""" ) is False
_snake_case : int = """./local/path"""
_snake_case : List[Any] = extract_path_from_uri(snake_case__ )
assert dataset_path == new_dataset_path
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[int] = is_remote_filesystem(snake_case__ )
assert is_remote is True
_snake_case : Any = fsspec.filesystem("""file""" )
_snake_case : Tuple = is_remote_filesystem(snake_case__ )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Dict = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
_snake_case : int = input_paths[compression_fs_class.protocol]
if input_path is None:
_snake_case : List[Any] = F"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
_snake_case : Any = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
_snake_case : Optional[int] = os.path.basename(snake_case__ )
_snake_case : Dict = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(snake_case__ , """r""" , encoding="""utf-8""" ) as f, open(snake_case__ , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : Any = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
_snake_case : Union[str, Any] = compressed_file_paths[protocol]
_snake_case : Union[str, Any] = """dataset.jsonl"""
_snake_case : Union[str, Any] = F"{protocol}://{member_file_path}::{compressed_file_path}"
_snake_case , *_snake_case : str = fsspec.get_fs_token_paths(snake_case__ )
assert fs.isfile(snake_case__ )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : Any , snake_case__ : Any ):
"""simple docstring"""
_snake_case : Tuple = hf_api.dataset_info(snake_case__ , token=snake_case__ )
_snake_case : Optional[Any] = HfFileSystem(repo_info=snake_case__ , token=snake_case__ )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(snake_case__ ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case__ , snake_case__ , clobber=snake_case__ )
with pytest.warns(snake_case__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case__ ) == 1
assert (
str(warning_info[0].message )
== F"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| 64
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333
| 0
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A : str = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if exitstatus == 5:
_UpperCAmelCase = 0
# Doctest custom flag to ignore output.
__A : List[Any] = doctest.register_optionflag("IGNORE_RESULT")
__A : Any = doctest.OutputChecker
class _a ( lowerCAmelCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] )->Dict:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__A : int = CustomOutputChecker
__A : List[Any] = HfDoctestModule
__A : List[str] = HfDocTestParser
| 326
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Union[str, Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A : Tuple = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A : List[str] = spec.loader.load_module()
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A : List[str] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase = False
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = True
break
_UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 326
| 1
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __SCREAMING_SNAKE_CASE ( ):
print('''Making key files...''' )
make_key_files('''rsa''' , 10_24 )
print('''Key files generation successful.''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
print('''Generating prime p...''' )
lowerCAmelCase__ : List[str] = rabinMiller.generate_large_prime(A_ )
print('''Generating prime q...''' )
lowerCAmelCase__ : Optional[Any] = rabinMiller.generate_large_prime(A_ )
lowerCAmelCase__ : Tuple = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
lowerCAmelCase__ : Any = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(A_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
lowerCAmelCase__ : Optional[int] = cryptoMath.find_mod_inverse(A_ , (p - 1) * (q - 1) )
lowerCAmelCase__ : Optional[int] = (n, e)
lowerCAmelCase__ : Union[str, Any] = (n, d)
return (public_key, private_key)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowerCAmelCase__ ,lowerCAmelCase__ : Any = generate_key(A_ )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , '''w''' ) as out_file:
out_file.write(f'{key_size},{public_key[0]},{public_key[1]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , '''w''' ) as out_file:
out_file.write(f'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 106
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCamelCase : Optional[Any] = ['gpt2']
__UpperCamelCase : str = 'gpt2'
if is_tf_available():
class lowercase__ ( tf.Module):
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = tokenizer
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenized['''input_ids'''].to_tensor()
SCREAMING_SNAKE_CASE : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE : List[Any] = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase__ ( unittest.TestCase):
def __A ( self : int ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE : List[str] = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE : Tuple = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self : str ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Dict = tokenizer([test_inputs] , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE : int = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Optional[int] = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = compiled_tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : str = ModelToSave(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''saved.model'''
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': model.serving} )
SCREAMING_SNAKE_CASE : str = tf.saved_model.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = loaded_model.signatures['''serving_default'''](UpperCamelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE : Optional[Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE : Tuple = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 182
| 0
|
import numpy
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Optional[int] , _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : numpy.ndarray ):
A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
A = numpy.random.rand(3 , 1 )
# Real output values provided.
A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
A = numpy.zeros(output_array.shape )
def A (self : Optional[Any] ):
A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A (self : int ):
A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A (self : Dict , _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
A = self.feedforward()
self.back_propagation()
if give_loss:
A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def A (self : Optional[Any] , _lowerCAmelCase : numpy.ndarray ):
A = input_arr
A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __a ( UpperCAmelCase ) ->numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def __a ( UpperCAmelCase ) ->numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def __a ( ) ->int:
"""simple docstring"""
A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
A = TwoHiddenLayerNeuralNetwork(
input_array=a__ , output_array=a__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a__ , iterations=10 , give_loss=a__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 337
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : List[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowerCamelCase_ : List[Any] = k.replace(_lowercase , _lowercase )
return k
def lowercase_ ( _lowercase , _lowercase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowerCamelCase_ : Any = DEFAULTS.copy()
cfg_kwargs.update(_lowercase )
lowerCamelCase_ : List[Any] = PegasusConfig(**_lowercase )
lowerCamelCase_ : Optional[int] = PegasusForConditionalGeneration(_lowercase )
lowerCamelCase_ : str = torch_model.model.state_dict()
lowerCamelCase_ : Dict = {}
for k, v in tf_weights.items():
lowerCamelCase_ : Union[str, Any] = rename_state_dict_key(_lowercase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
lowerCamelCase_ : Dict = v.T
lowerCamelCase_ : Optional[int] = torch.tensor(_lowercase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
lowerCamelCase_ : Optional[int] = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
lowerCamelCase_ : Union[str, Any] = mapping['''shared.weight''']
lowerCamelCase_ : str = mapping['''shared.weight''']
lowerCamelCase_ : Optional[Any] = {k: torch.zeros_like(_lowercase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_lowercase )
lowerCamelCase_, lowerCamelCase_ : List[Any] = torch_model.model.load_state_dict(_lowercase , strict=_lowercase )
lowerCamelCase_ : str = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase_ ( _lowercase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = tf.train.list_variables(_lowercase )
lowerCamelCase_ : Dict = {}
lowerCamelCase_ : int = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_lowercase , desc='''converting tf checkpoint to dict''' ):
lowerCamelCase_ : List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase_ : List[str] = tf.train.load_variable(_lowercase , _lowercase )
lowerCamelCase_ : Optional[int] = array
return tf_weights
def lowercase_ ( _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = Path(_lowercase ).parent.name
lowerCamelCase_ : Any = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
lowerCamelCase_ : Tuple = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=_lowercase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowercase )
# convert model
lowerCamelCase_ : Union[str, Any] = get_tf_weights_as_numpy(_lowercase )
lowerCamelCase_ : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
lowerCamelCase_ : Union[str, Any] = task_specific_params
lowerCamelCase_ : Tuple = convert_pegasus(_lowercase , _lowercase )
torch_model.save_pretrained(_lowercase )
lowerCamelCase_ : Any = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_lowercase , Path(_lowercase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : Tuple = parser.parse_args()
if args.save_dir is None:
__lowercase : List[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Dict = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 318
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318
| 1
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = ['''input_features''', '''attention_mask''']
def __init__( self , _UpperCamelCase=8_0 , _UpperCamelCase=1_6_0_0_0 , _UpperCamelCase=8_0 , _UpperCamelCase=0.0 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , **_UpperCamelCase , ) -> Any:
super().__init__(feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = num_mel_bins
UpperCAmelCase_ : str = do_ceptral_normalize
UpperCAmelCase_ : Union[str, Any] = normalize_means
UpperCAmelCase_ : List[Any] = normalize_vars
UpperCAmelCase_ : Optional[int] = True
def __UpperCAmelCase ( self , _UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : List[Any] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
UpperCAmelCase_ : Any = ta_kaldi.fbank(_UpperCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
UpperCAmelCase_ : List[str] = x[:input_length].mean(axis=0 )
UpperCAmelCase_ : List[str] = np.subtract(_UpperCamelCase , _UpperCamelCase )
if normalize_vars:
UpperCAmelCase_ : Union[str, Any] = x[:input_length].std(axis=0 )
UpperCAmelCase_ : str = np.divide(_UpperCamelCase , _UpperCamelCase )
if input_length < x.shape[0]:
UpperCAmelCase_ : List[Any] = padding_value
# make sure array is in float32
UpperCAmelCase_ : int = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[np.ndarray]:
UpperCAmelCase_ : int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_UpperCamelCase , _UpperCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_UpperCamelCase , _UpperCamelCase )
]
def __call__( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCAmelCase_ : int = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase_ : str = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ : Optional[Any] = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
UpperCAmelCase_ : Optional[Any] = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ : Optional[Any] = [raw_speech]
# extract fbank features
UpperCAmelCase_ : Union[str, Any] = [self._extract_fbank_features(_UpperCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase_ : Tuple = BatchFeature({'input_features': features} )
UpperCAmelCase_ : Dict = self.pad(
_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
# make sure list is in array format
UpperCAmelCase_ : int = padded_inputs.get('input_features' )
if isinstance(input_features[0] , _UpperCamelCase ):
UpperCAmelCase_ : Dict = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase_ : Optional[int] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCAmelCase_ : Optional[int] = [np.asarray(_UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCAmelCase_ : List[str] = (
np.array(_UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(_UpperCamelCase , max_length=_UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase_ : str = self.normalize(
padded_inputs['input_features'] , attention_mask=_UpperCamelCase )
if return_tensors is not None:
UpperCAmelCase_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs
| 145
|
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 0, 0, 0
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
UpperCAmelCase_ : Tuple = ugly_nums[ia] * 3
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
UpperCAmelCase_ : Tuple = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(200) = }')
| 145
| 1
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __lowercase (lowerCAmelCase_ ):
def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ) ->List[str]:
'''simple docstring'''
super().__init__(
features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCAmelCase : Any = Generator(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , generator=lowerCamelCase__ , gen_kwargs=lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
if self.streaming:
__lowerCAmelCase : int = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : int = None
__lowerCAmelCase : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
__lowerCAmelCase : Optional[int] = self.builder.as_dataset(
split='''train''' , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 275
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = spectrogram_length
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = num_audio_channels
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = chunk_length
SCREAMING_SNAKE_CASE = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
def _flatten(lowerCamelCase__ : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__magic_name__: Optional[Any] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
__magic_name__: str = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
__magic_name__ : Optional[Any] = state_dict.pop(_A )
# emb -> embedding
if name.startswith("""emb.""" ):
__magic_name__ : Union[str, Any] = name.replace("""emb.""", """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
__magic_name__ : int = name.replace("""blocks.0.ln0""", """blocks.0.pre_ln""" )
# att -> attention
__magic_name__ : Optional[int] = re.sub(R"""blocks\.(\d+)\.att""", R"""blocks.\1.attention""", _A )
# ffn -> feed_forward
__magic_name__ : List[str] = re.sub(R"""blocks\.(\d+)\.ffn""", R"""blocks.\1.feed_forward""", _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
__magic_name__ : int = name.replace(""".time_mix_k""", """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
__magic_name__ : Dict = name.replace(""".time_mix_v""", """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
__magic_name__ : List[str] = name.replace(""".time_mix_r""", """.time_mix_receptance""" )
if name != "head.weight":
__magic_name__ : Any = """rwkv.""" + name
__magic_name__ : Optional[int] = weight
return state_dict
def UpperCamelCase ( _A, _A, _A, _A=None, _A=None, _A=False, _A=None ):
"""simple docstring"""
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
__magic_name__ : str = 50277
__magic_name__ : Optional[int] = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
__magic_name__ : Tuple = PreTrainedTokenizerFast(tokenizer_file=_A )
__magic_name__ : Optional[Any] = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
__magic_name__ : Union[str, Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__magic_name__ : Any = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f'`size` should be one of {possible_sizes}, got {size}.' )
__magic_name__ : Optional[Any] = RwkvConfig(
vocab_size=_A, num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size], hidden_size=HIDEN_SIZE_MAPPING[size], )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
__magic_name__ : int = hf_hub_download(_A, _A )
__magic_name__ : Optional[int] = torch.load(_A, map_location="""cpu""" )
__magic_name__ : int = convert_state_dict(_A )
# 4. Split in shards and save
__magic_name__ ,__magic_name__ : Dict = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A, os.path.join(_A, _A ) )
if index is not None:
__magic_name__ : int = os.path.join(_A, _A )
# Save the index as well
with open(_A, """w""", encoding="""utf-8""" ) as f:
__magic_name__ : str = json.dumps(_A, indent=2, sort_keys=_A ) + """\n"""
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
__magic_name__ : str = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__magic_name__ : Optional[int] = torch.load(os.path.join(_A, _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(_A, _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
__magic_name__ : Any = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A, max_shard_size="""2GB""" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
__magic_name__: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
__magic_name__: Optional[Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 138
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ : Dict = precision
__magic_name__ : str = ceil(precision / 14 )
__magic_name__ : List[str] = 426880 * Decimal(10005 ).sqrt()
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = 13591409
__magic_name__ : Tuple = Decimal(_A )
for k in range(1, _A ):
__magic_name__ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_A ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__magic_name__: Tuple = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 138
| 1
|
def A_ ( A__ , A__ ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 99
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
UpperCamelCase__ , UpperCamelCase__ : int = b, a % b
return a
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def SCREAMING_SNAKE_CASE ( ) -> str:
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 189
| 0
|
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase_ (UpperCamelCase__ : bool = True , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any] ):
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
_UpperCAmelCase : List[Any] = False
if main_process_only:
_UpperCAmelCase : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*UpperCamelCase__ , **UpperCamelCase__ , disable=UpperCamelCase__ )
| 68
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict:
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Union[str, Any] = seq_length
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Optional[Any] = use_input_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : List[str] = type_sequence_label_size
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : int = num_choices
_UpperCAmelCase : Dict = scope
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : int = None
if self.use_input_mask:
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[Any] = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Tuple:
_UpperCAmelCase : int = DistilBertModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , A )
_UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Any:
_UpperCAmelCase : int = DistilBertForMaskedLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Any = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Tuple = DistilBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(
A , attention_mask=A , start_positions=A , end_positions=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> str:
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = DistilBertForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Optional[int] = DistilBertForTokenClassification(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : str = self.num_choices
_UpperCAmelCase : Dict = DistilBertForMultipleChoice(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : int = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ =(
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ =True
a__ =True
a__ =True
a__ =True
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = DistilBertModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , dim=3_7 )
def __lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = DistilBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Union[str, Any] = model_class(config=A )
_UpperCAmelCase : List[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : int = torch.jit.trace(
A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A , os.path.join(A , '''traced_model.pt''' ) )
_UpperCAmelCase : Optional[int] = torch.jit.load(os.path.join(A , '''traced_model.pt''' ) , map_location=A )
loaded(inputs_dict['''input_ids'''].to(A ) , inputs_dict['''attention_mask'''].to(A ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Dict = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_UpperCAmelCase : List[str] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : str = model(A , attention_mask=A )[0]
_UpperCAmelCase : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : Any = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 68
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if digit_amount > 0:
return round(number - int(snake_case_ ),snake_case_ )
return number - int(snake_case_ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 26
|
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : int ) ->List[Any]:
"""simple docstring"""
if openai_config_file == "":
__snake_case : Dict = OpenAIGPTConfig()
else:
__snake_case : int = OpenAIGPTConfig.from_json_file(_snake_case )
__snake_case : Tuple = OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__snake_case : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__snake_case : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 102
| 0
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_a = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 23
|
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23
| 1
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[Any]=0.01 , lowerCAmelCase__ : Dict=1000 ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = p_stop
_UpperCamelCase = max_length
def __iter__( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCamelCase = random.random() < self.p_stop
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Any=True ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
BatchSamplerShard(lowerCAmelCase__ , 2 , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
for i in range(2 )
]
_UpperCamelCase = [list(lowerCAmelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase__ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase__ ) for e in expected] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCamelCase = [BatchSamplerShard(lowerCAmelCase__ , 2 , lowerCAmelCase__ , even_batches=lowerCAmelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def snake_case__ ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
random.seed(lowerCAmelCase__ )
_UpperCamelCase = list(lowerCAmelCase__ )
_UpperCamelCase = [
IterableDatasetShard(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , drop_last=lowerCAmelCase__ , num_processes=lowerCAmelCase__ , process_index=lowerCAmelCase__ , split_batches=lowerCAmelCase__ , )
for i in range(lowerCAmelCase__ )
]
_UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase__ )
iterable_dataset_lists.append(list(lowerCAmelCase__ ) )
_UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
self.assertTrue(len(lowerCAmelCase__ ) % shard_batch_size == 0 )
_UpperCamelCase = []
for idx in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase__ ) < len(lowerCAmelCase__ ):
reference += reference
self.assertListEqual(lowerCAmelCase__ , reference[: len(lowerCAmelCase__ )] )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Edge case with a very small dataset
_UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
_UpperCamelCase = SkipBatchSampler(lowerCAmelCase__ , 2 )
self.assertListEqual(list(lowerCAmelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
_UpperCamelCase = skip_first_batches(lowerCAmelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def snake_case__ ( self : int ) -> Dict:
'''simple docstring'''
Accelerator()
_UpperCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 324
|
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="None" , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Optional[Any]:
a : List[str] = parent
a : int = batch_size
a : Optional[int] = seq_length
a : List[str] = is_training
a : Optional[int] = use_input_mask
a : Optional[Any] = use_token_type_ids
a : List[str] = use_labels
a : List[Any] = vocab_size
a : List[Any] = hidden_size
a : int = num_hidden_layers
a : str = num_attention_heads
a : Tuple = intermediate_size
a : Tuple = hidden_act
a : List[str] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : List[Any] = max_position_embeddings
a : Optional[Any] = type_vocab_size
a : Optional[Any] = type_sequence_label_size
a : Optional[Any] = initializer_range
a : int = num_labels
a : str = num_choices
a : Optional[int] = relative_attention
a : Optional[Any] = position_biased_input
a : Dict = pos_att_type
a : Tuple = scope
def __a ( self ) -> Dict:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[int] = None
if self.use_input_mask:
a : int = random_attention_mask([self.batch_size, self.seq_length] )
a : Union[str, Any] = None
if self.use_token_type_ids:
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : str = None
a : int = None
a : List[str] = None
if self.use_labels:
a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : Dict = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : List[str] = TFDebertaVaModel(config=lowerCAmelCase__ )
a : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Any = [input_ids, input_mask]
a : str = model(lowerCAmelCase__ )
a : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Union[str, Any] = TFDebertaVaForMaskedLM(config=lowerCAmelCase__ )
a : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : int = self.num_labels
a : Optional[int] = TFDebertaVaForSequenceClassification(config=lowerCAmelCase__ )
a : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Optional[int] = self.num_labels
a : int = TFDebertaVaForTokenClassification(config=lowerCAmelCase__ )
a : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Union[str, Any] = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase__ )
a : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> Tuple:
a : Dict = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Tuple = config_and_inputs
a : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =(
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : List[str] =(
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Dict =False
def __a ( self ) -> List[Any]:
a : str = TFDebertaVaModelTester(self )
a : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> int:
self.config_tester.run_common_tests()
def __a ( self ) -> Tuple:
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> str:
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def __a ( self ) -> str:
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> List[str]:
a : str = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __a ( self ) -> int:
pass
@slow
def __a ( self ) -> Optional[int]:
a : int = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
a : Union[str, Any] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
a : str = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a : Any = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 )
| 79
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if isinstance(_lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCamelCase :
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
pass
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> str:
pass
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Dict = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
a : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
a, a : Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = {"vision_model": vision_model, "text_model": text_model}
a : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
a, a : Dict = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = {"vision_model": vision_model, "text_model": text_model}
a : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
a : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : Dict = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : List[Any] = after_output[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[Any]:
a, a : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = {"vision_model": vision_model, "text_model": text_model}
a : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : Tuple = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
a : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[int] = to_atuple(vision_model.config.image_size )
a : Tuple = to_atuple(vision_model.config.patch_size )
a : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a : str = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
a : List[Any] = inputs_dict
a : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a : int = pt_model(**lowerCAmelCase__ ).to_tuple()
a : Union[str, Any] = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
a : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
a : Optional[int] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
a : int = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
a : List[str] = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : int = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def __a ( self ) -> Any:
a : List[Any] = self.prepare_config_and_inputs()
a : Tuple = config_inputs_dict.pop("vision_config" )
a : int = config_inputs_dict.pop("text_config" )
a : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __a ( self ) -> List[Any]:
a, a : Optional[int] = self.get_pretrained_model_and_inputs()
a : Optional[int] = model_a(**lowerCAmelCase__ )
a : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
a : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : str = model_a(**lowerCAmelCase__ )
a : Dict = after_outputs[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Any = 13
a : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Optional[Any] = random_attention_mask([batch_size, 4] )
a : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Dict = FlaxViTModel(lowerCAmelCase__ )
a : Dict = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> str:
a : Union[str, Any] = FlaxViTModelTester(self )
a : Dict = FlaxBertModelTester(self )
a : str = vit_model_tester.prepare_config_and_inputs()
a : Any = bert_model_tester.prepare_config_and_inputs()
a, a : Optional[int] = vision_config_and_inputs
a, a, a, a : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Tuple = 13
a : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Tuple = random_attention_mask([batch_size, 4] )
a : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[Any] = FlaxCLIPVisionModel(lowerCAmelCase__ )
a : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> List[Any]:
a : Tuple = FlaxCLIPVisionModelTester(self )
a : Union[str, Any] = FlaxBertModelTester(self )
a : Dict = clip_model_tester.prepare_config_and_inputs()
a : Optional[int] = bert_model_tester.prepare_config_and_inputs()
a, a : Dict = vision_config_and_inputs
a, a, a, a : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Dict:
a : str = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
a : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
a : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" )
a : Optional[Any] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a : List[str] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
| 79
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( _A ) -> int:
"""simple docstring"""
snake_case__ = filter(lambda _A : p.requires_grad , model.parameters() )
snake_case__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase : Any = logging.getLogger(__name__)
def a_ ( _A , _A ) -> Dict:
"""simple docstring"""
if metric == "rouge2":
snake_case__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
snake_case__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
snake_case__ = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
snake_case__ = ModelCheckpoint(
dirpath=_A , filename=_A , monitor=f'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def a_ ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=_A , verbose=_A , )
class __SCREAMING_SNAKE_CASE( pl.Callback ):
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[Any] ) -> Union[str, Any]:
snake_case__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase )
@rank_zero_only
def lowerCAmelCase_ ( self: str , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule , UpperCamelCase: str , UpperCamelCase: str=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
snake_case__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
snake_case__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case__ = od / 'test_results.txt'
snake_case__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
snake_case__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCamelCase )
generations_file.parent.mkdir(exist_ok=UpperCamelCase )
with open(UpperCamelCase , 'a+' ) as writer:
for key in sorted(UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case__ = metrics[key]
if isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = val.item()
snake_case__ = F'''{key}: {val:.6f}\n'''
writer.write(UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
snake_case__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(UpperCamelCase )
@rank_zero_only
def lowerCAmelCase_ ( self: int , UpperCamelCase: List[str] , UpperCamelCase: int ) -> Optional[Any]:
try:
snake_case__ = pl_module.model.model.num_parameters()
except AttributeError:
snake_case__ = pl_module.model.num_parameters()
snake_case__ = count_trainable_parameters(UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCAmelCase_ ( self: int , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase , UpperCamelCase , 'test' )
@rank_zero_only
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: pl.Trainer , UpperCamelCase: Tuple ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 307
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCAmelCase__ = '''sshleifer/mar_enro_6_3_student'''
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Tuple:
super().setUp()
UpperCamelCase = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=__a , )
UpperCamelCase = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def snake_case_ (self ) -> Dict:
MarianMTModel.from_pretrained(__a )
@slow
@require_torch_gpu
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
UpperCamelCase = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
UpperCamelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
UpperCamelCase = bash_script.replace(__a , str(__a ) )
UpperCamelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCamelCase = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCamelCase = ["finetune.py"] + bash_script.split() + args
with patch.object(__a , "argv" , __a ):
UpperCamelCase = argparse.ArgumentParser()
UpperCamelCase = pl.Trainer.add_argparse_args(__a )
UpperCamelCase = SummarizationModule.add_model_specific_args(__a , os.getcwd() )
UpperCamelCase = parser.parse_args()
UpperCamelCase = main(__a )
# Check metrics
UpperCamelCase = load_json(model.metrics_save_path )
UpperCamelCase = metrics["val"][0]
UpperCamelCase = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , __a )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase = os.listdir(__a )
UpperCamelCase = [x for x in contents if x.endswith(".ckpt" )][0]
UpperCamelCase = os.path.join(args.output_dir , __a )
UpperCamelCase = torch.load(__a , map_location="cpu" )
UpperCamelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class _lowerCamelCase ( _lowercase ):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def snake_case_ (self ) -> List[str]:
UpperCamelCase = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCamelCase = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_28,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
UpperCamelCase = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
UpperCamelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
UpperCamelCase = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
UpperCamelCase = bash_script.replace(__a , str(__a ) )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = bash_script.replace("--fp16" , "" )
UpperCamelCase = 6
UpperCamelCase = (
["distillation.py"]
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
"--gpus=1",
"--learning_rate=1e-3",
F"--num_train_epochs={epochs}",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(__a , "argv" , __a ):
UpperCamelCase = argparse.ArgumentParser()
UpperCamelCase = pl.Trainer.add_argparse_args(__a )
UpperCamelCase = SummarizationDistiller.add_model_specific_args(__a , os.getcwd() )
UpperCamelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCamelCase = distill_main(__a )
# Check metrics
UpperCamelCase = load_json(model.metrics_save_path )
UpperCamelCase = metrics["val"][0]
UpperCamelCase = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , __a )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase = os.listdir(__a )
UpperCamelCase = [x for x in contents if x.endswith(".ckpt" )][0]
UpperCamelCase = os.path.join(args.output_dir , __a )
UpperCamelCase = torch.load(__a , map_location="cpu" )
UpperCamelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 244
|
"""simple docstring"""
import math
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = 0
while num > 0:
UpperCamelCase = num % 8
UpperCamelCase = octal + (remainder * math.floor(math.pow(10 , _SCREAMING_SNAKE_CASE ) ))
counter += 1
UpperCamelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(_SCREAMING_SNAKE_CASE )}"
def a__ ( ):
"""simple docstring"""
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 244
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , a__ , a__ , a__ = None , a__ = None ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case_ = pad_token_id
snake_case_ = max_length
snake_case_ = vocab
snake_case_ = merges
snake_case_ = BytePairTokenizer(a__ , a__ , sequence_length=a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , *a__ , **a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = [" ".join(a__ ) for m in tokenizer.bpe_ranks.keys()]
snake_case_ = tokenizer.get_vocab()
return cls(a__ , a__ , *a__ , **a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , *a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = GPTaTokenizer.from_pretrained(a__ , *a__ , **a__ )
return cls.from_tokenizer(a__ , *a__ , **a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ ) -> str:
'''simple docstring'''
return cls(**a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[str]:
'''simple docstring'''
snake_case_ = self.tf_tokenizer(a__ )
snake_case_ = tf.ones_like(a__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
snake_case_ = max_length if max_length is not None else self.max_length
if max_length is not None:
snake_case_ = pad_model_inputs(
a__ , max_seq_length=a__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 85
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
_UpperCamelCase = logging.getLogger(__name__)
def lowerCAmelCase__( lowercase : str ) -> List[str]:
__snake_case : int = git.Repo(search_parent_directories=lowercase )
__snake_case : Union[str, Any] = {
"repo_id": str(lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f:
json.dump(lowercase , lowercase , indent=4 )
def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]:
if params.n_gpu <= 0:
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = -1
__snake_case : Union[str, Any] = True
__snake_case : Tuple = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] )
__snake_case : int = int(os.environ["N_GPU_NODE"] )
__snake_case : Union[str, Any] = int(os.environ["RANK"] )
# number of nodes / node ID
__snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node
__snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node
__snake_case : Union[str, Any] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__snake_case : Any = 1
__snake_case : str = 0
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
__snake_case : int = 1
__snake_case : Optional[Any] = 1
__snake_case : Tuple = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0
__snake_case : List[Any] = params.n_nodes > 1
# summary
__snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 326
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 1_0_0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCAmelCase__ :str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCAmelCase__ :Any = audio_length_in_s * self.unet.config.sample_rate
lowerCAmelCase__ :Tuple = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
lowerCAmelCase__ :Tuple = int(_UpperCAmelCase )
if sample_size % down_scale_factor != 0:
lowerCAmelCase__ :Tuple = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
' process.' )
lowerCAmelCase__ :List[Any] = int(_UpperCAmelCase )
lowerCAmelCase__ :int = next(iter(self.unet.parameters() ) ).dtype
lowerCAmelCase__ :int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
lowerCAmelCase__ :Union[str, Any] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase , device=audio.device )
lowerCAmelCase__ :str = self.scheduler.timesteps.to(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase__ :Any = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCAmelCase__ :Dict = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
lowerCAmelCase__ :List[str] = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCAmelCase__ :Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 369
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :int = """swin"""
__magic_name__ :Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=9_6 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 1_2, 2_4] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Any = image_size
lowerCAmelCase__ :List[Any] = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :str = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :List[str] = len(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = num_heads
lowerCAmelCase__ :List[Any] = window_size
lowerCAmelCase__ :List[Any] = mlp_ratio
lowerCAmelCase__ :int = qkv_bias
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :List[Any] = drop_path_rate
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :Dict = use_absolute_embeddings
lowerCAmelCase__ :int = layer_norm_eps
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :int = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ :str = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
lowerCAmelCase__ :str = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 254
| 0
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowerCAmelCase : Dict = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = {}
state_dict.pop("pixel_mean" , _lowerCAmelCase )
state_dict.pop("pixel_std" , _lowerCAmelCase )
UpperCAmelCase__ = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase__ = key.replace(_lowerCAmelCase , _lowerCAmelCase )
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = int(re.match(_lowerCAmelCase , _lowerCAmelCase ).group(2 ) )
if layer_nb == 0:
UpperCAmelCase__ = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
UpperCAmelCase__ = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
UpperCAmelCase__ = key.replace("layers.2" , "proj_out" )
UpperCAmelCase__ = value
UpperCAmelCase__ = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def lowerCAmelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any]="ybelkada/segment-anything" ):
"""simple docstring"""
UpperCAmelCase__ = hf_hub_download(_lowerCAmelCase , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
UpperCAmelCase__ = SamConfig()
elif "sam_vit_l" in model_name:
UpperCAmelCase__ = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
UpperCAmelCase__ = SamConfig(
vision_config=_lowerCAmelCase , )
elif "sam_vit_h" in model_name:
UpperCAmelCase__ = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
UpperCAmelCase__ = SamConfig(
vision_config=_lowerCAmelCase , )
UpperCAmelCase__ = torch.load(_lowerCAmelCase , map_location="cpu" )
UpperCAmelCase__ = replace_keys(_lowerCAmelCase )
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowerCAmelCase )
UpperCAmelCase__ = SamModel(_lowerCAmelCase )
hf_model.load_state_dict(_lowerCAmelCase )
UpperCAmelCase__ = hf_model.to("cuda" )
UpperCAmelCase__ = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
UpperCAmelCase__ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
UpperCAmelCase__ = [[[400, 650]]]
UpperCAmelCase__ = [[1]]
UpperCAmelCase__ = processor(images=np.array(_lowerCAmelCase ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase__ = hf_model(**_lowerCAmelCase )
UpperCAmelCase__ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
UpperCAmelCase__ = processor(
images=np.array(_lowerCAmelCase ) , input_points=_lowerCAmelCase , input_labels=_lowerCAmelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase__ = hf_model(**_lowerCAmelCase )
UpperCAmelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
UpperCAmelCase__ = ((75, 275, 1725, 850),)
UpperCAmelCase__ = processor(images=np.array(_lowerCAmelCase ) , input_boxes=_lowerCAmelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase__ = hf_model(**_lowerCAmelCase )
UpperCAmelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
UpperCAmelCase__ = [[[400, 650], [800, 650]]]
UpperCAmelCase__ = [[1, 1]]
UpperCAmelCase__ = processor(
images=np.array(_lowerCAmelCase ) , input_points=_lowerCAmelCase , input_labels=_lowerCAmelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase__ = hf_model(**_lowerCAmelCase )
UpperCAmelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
_lowerCAmelCase : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 169
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 169
| 1
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : List[Any] , _A : int , _A : Tuple=None , _A : Optional[int]=True , _A : List[Any]=None , **_A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = parent
snake_case_ : Tuple = config_class
snake_case_ : str = has_text_modality
snake_case_ : List[str] = kwargs
snake_case_ : Any = common_properties
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = self.config_class(**self.inputs_dict )
snake_case_ : Any = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_A , _A ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(_A ):
try:
setattr(_A , _A , _A )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=F"""`{name} value {idx} expected, but was {getattr(_A , _A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_A ):
try:
snake_case_ : Any = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=F"""`{name} value {idx} expected, but was {getattr(_A , _A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = self.config_class(**self.inputs_dict )
snake_case_ : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _A )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = os.path.join(_A , 'config.json' )
config_first.to_json_file(_A )
snake_case_ : Union[str, Any] = self.config_class.from_json_file(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
snake_case_ : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_A )
snake_case_ : Optional[int] = self.config_class.from_pretrained(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self.config_class(**self.inputs_dict )
snake_case_ : Dict = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = os.path.join(_A , _A )
config_first.save_pretrained(_A )
snake_case_ : str = self.config_class.from_pretrained(_A , subfolder=_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
snake_case_ : Optional[Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
if self.config_class.is_composition:
return
snake_case_ : Dict = self.config_class()
self.parent.assertIsNotNone(_A )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = copy.deepcopy(_A )
snake_case_ : List[Any] = self.config_class(**_A )
snake_case_ : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(_A , _A ) != value:
wrong_values.append((key, getattr(_A , _A ), value) )
if len(_A ) > 0:
snake_case_ : Tuple = '\n'.join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 88
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = None
if token is not None:
snake_case_ : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case_ : Optional[int] = requests.get(__a , headers=__a ).json()
snake_case_ : List[str] = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case_ : Dict = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : Optional[Any] = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Union[str, Any] = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
snake_case_ : Union[str, Any] = requests.get(__a , headers=__a ).json()
snake_case_ : Any = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
snake_case_ : str = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : int = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : Dict = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[int] = requests.get(__a , headers=__a , allow_redirects=__a )
snake_case_ : str = result.headers['Location']
snake_case_ : List[str] = requests.get(__a , allow_redirects=__a )
snake_case_ : Optional[Any] = os.path.join(__a , f"""{artifact_name}.zip""" )
with open(__a , 'wb' ) as fp:
fp.write(response.content )
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = []
snake_case_ : Tuple = None
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__a ) as f:
for line in f:
snake_case_ : Tuple = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case_ : Tuple = line[: line.index(': ' )]
snake_case_ : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
snake_case_ : Any = line[len('FAILED ' ) :]
failed_tests.append(__a )
elif filename == "job_name.txt":
snake_case_ : Union[str, Any] = line
if len(__a ) != len(__a ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__a )} for `errors` """
f"""and {len(__a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
snake_case_ : List[str] = None
if job_name and job_links:
snake_case_ : Union[str, Any] = job_links.get(__a , __a )
# A list with elements of the form (line of error, error, failed test)
snake_case_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(__a , __a )]
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = [os.path.join(__a , __a ) for p in os.listdir(__a ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__a , job_links=__a ) )
return errors
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
snake_case_ : str = counter.most_common()
snake_case_ : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case_ : int = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Tuple = test.split('::' )[0]
if test.startswith('tests/models/' ):
snake_case_ : List[str] = test.split('/' )[2]
else:
snake_case_ : Union[str, Any] = None
return test
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case_ : str = [x for x in logs if x[2] is not None]
snake_case_ : int = {x[2] for x in logs}
snake_case_ : Dict = {}
for test in tests:
snake_case_ : List[str] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case_ : Any = counter.most_common()
snake_case_ : str = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case_ : Tuple = sum(error_counts.values() )
if n_errors > 0:
snake_case_ : List[Any] = {'count': n_errors, 'errors': error_counts}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| no. | error | status |'
snake_case_ : str = '|-:|:-|:-|'
snake_case_ : Tuple = [header, sep]
for error in reduced_by_error:
snake_case_ : Dict = reduced_by_error[error]['count']
snake_case_ : List[str] = f"""| {count} | {error[:1_00]} | |"""
lines.append(__a )
return "\n".join(__a )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| model | no. of errors | major error | count |'
snake_case_ : Union[str, Any] = '|-:|-:|-:|-:|'
snake_case_ : Optional[int] = [header, sep]
for model in reduced_by_model:
snake_case_ : Any = reduced_by_model[model]['count']
snake_case_ ,snake_case_ : Dict = list(reduced_by_model[model]['errors'].items() )[0]
snake_case_ : Any = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__a )
return "\n".join(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(""" / """)
_SCREAMING_SNAKE_CASE = k[index + len(""" / """) :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 88
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'vocab_file': 'vocab.json'}
a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
a = {'mgp-str': 2_7}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple="[GO]" , lowerCAmelCase : str="[GO]" , lowerCAmelCase : List[Any]="[s]" , lowerCAmelCase : Dict="[GO]" , **lowerCAmelCase : Any ):
super().__init__(
unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase = json.load(lowerCAmelCase )
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def __lowercase ( self : str ):
return len(self.vocab )
def __lowercase ( self : Any ):
return dict(self.vocab , **self.added_tokens_encoder )
def __lowercase ( self : int , lowerCAmelCase : str ):
lowerCAmelCase = []
for s in text:
char_tokens.extend(lowerCAmelCase )
return char_tokens
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Tuple ):
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) )
def __lowercase ( self : Any , lowerCAmelCase : Dict ):
return self.decoder.get(lowerCAmelCase )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCAmelCase ) )
return
lowerCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + """\n""" )
return (vocab_file,)
| 155
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a = logging.get_logger(__name__)
a = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'dpt'
def __init__( self : int , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=3072 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : str=0.02 , lowerCAmelCase : str=1e-12 , lowerCAmelCase : Optional[Any]=384 , lowerCAmelCase : str=16 , lowerCAmelCase : int=3 , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=[2, 5, 8, 11] , lowerCAmelCase : Tuple="project" , lowerCAmelCase : Optional[int]=[4, 2, 1, 0.5] , lowerCAmelCase : Any=[96, 192, 384, 768] , lowerCAmelCase : int=256 , lowerCAmelCase : List[Any]=-1 , lowerCAmelCase : Any=False , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=0.4 , lowerCAmelCase : Dict=255 , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[Any]=[1, 1024, 24, 24] , lowerCAmelCase : str=[0, 1] , lowerCAmelCase : str=None , **lowerCAmelCase : Optional[Any] , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_size
lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowerCAmelCase = BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase = BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCAmelCase = backbone_featmap_shape
lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = []
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowerCAmelCase = readout_type
lowerCAmelCase = reassemble_factors
lowerCAmelCase = neck_hidden_sizes
lowerCAmelCase = fusion_hidden_size
lowerCAmelCase = head_in_index
lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = semantic_loss_ignore_index
lowerCAmelCase = semantic_classifier_dropout
def __lowercase ( self : Any ):
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 155
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A ( __a , unittest.TestCase ):
_UpperCamelCase : Any = ShapEImgaImgPipeline
_UpperCamelCase : Dict = ["""image"""]
_UpperCamelCase : Tuple = ["""image"""]
_UpperCamelCase : List[str] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
_UpperCamelCase : Any = False
@property
def __a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return 32
@property
def __a ( self : int ) -> int:
"""simple docstring"""
return 32
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return 8
@property
def __a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase : Optional[int] = CLIPVisionModel(a__ )
return model
@property
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=a__ , do_normalize=a__ , do_resize=a__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase : Optional[Any] = PriorTransformer(**a__ )
return model
@property
def __a ( self : List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Optional[Any] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase : Dict = ShapERenderer(**a__ )
return model
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Any = self.dummy_prior
lowercase : Optional[Any] = self.dummy_image_encoder
lowercase : List[Any] = self.dummy_image_processor
lowercase : str = self.dummy_renderer
lowercase : Optional[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
lowercase : Optional[int] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __a ( self : Optional[Any] , _A : List[Any] , _A : Union[str, Any]=0 ) -> List[str]:
"""simple docstring"""
lowercase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
if str(a__ ).startswith('''mps''' ):
lowercase : List[str] = torch.manual_seed(a__ )
else:
lowercase : List[Any] = torch.Generator(device=a__ ).manual_seed(a__ )
lowercase : int = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __a ( self : Dict ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = '''cpu'''
lowercase : Union[str, Any] = self.get_dummy_components()
lowercase : Optional[int] = self.pipeline_class(**a__ )
lowercase : Union[str, Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowercase : str = pipe(**self.get_dummy_inputs(a__ ) )
lowercase : List[str] = output.images[0]
lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase : Optional[Any] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = torch_device == '''cpu'''
lowercase : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Dict = self.get_dummy_components()
lowercase : Dict = self.pipeline_class(**a__ )
lowercase : str = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowercase : Union[str, Any] = 1
lowercase : Optional[Any] = 2
lowercase : Optional[int] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
lowercase : int = batch_size * [inputs[key]]
lowercase : int = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase : Tuple = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase : Optional[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowercase : List[str] = torch.Generator(device=a__ ).manual_seed(0 )
lowercase : Any = pipe(
a__ , generator=a__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 360
|
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __a ( self : Any , _A : Dict , _A : Any , _A : Any=None , _A : Any="uniform_average" , _A : Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowercase : Any = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 116
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , _lowerCamelCase : list[str] ):
_snake_case = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase__ )
self.set_fail_transitions()
def lowercase ( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase ( self : Dict , _lowerCamelCase : str ):
_snake_case = 0
for character in keyword:
_snake_case = self.find_next_state(lowerCAmelCase__ , lowerCAmelCase__ )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_snake_case = len(self.adlist ) - 1
else:
_snake_case = next_state
self.adlist[current_state]["output"].append(lowerCAmelCase__ )
def lowercase ( self : Optional[int] ):
_snake_case = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase__ )
_snake_case = 0
while q:
_snake_case = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase__ )
_snake_case = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(lowerCAmelCase__ , self.adlist[child]['''value'''] ) is None
and state != 0
):
_snake_case = self.adlist[state]["""fail_state"""]
_snake_case = self.find_next_state(
lowerCAmelCase__ , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
_snake_case = 0
_snake_case = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowercase ( self : Tuple , _lowerCamelCase : str ):
_snake_case = {} # returns a dict with keywords and list of its occurrences
_snake_case = 0
for i in range(len(lowerCAmelCase__ ) ):
while (
self.find_next_state(lowerCAmelCase__ , string[i] ) is None
and current_state != 0
):
_snake_case = self.adlist[current_state]["""fail_state"""]
_snake_case = self.find_next_state(lowerCAmelCase__ , string[i] )
if next_state is None:
_snake_case = 0
else:
_snake_case = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_snake_case = []
result[key].append(i - len(lowerCAmelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = 1, 1
for _ in range(number_of_steps - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 0
|
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( a , a , a=None , **a ) -> Union[str, Any]:
_A: Union[str, Any] = [x.strip() for x in open(snake_case__ ).readlines()]
_A: Dict = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
_A: List[str] = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 365
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : Any = MBartConfig
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Dict = '''gelu'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: List[Any] = batch_size
_A: Dict = seq_length
_A: Dict = is_training
_A: str = use_labels
_A: int = vocab_size
_A: str = hidden_size
_A: Tuple = num_hidden_layers
_A: Optional[Any] = num_attention_heads
_A: Tuple = intermediate_size
_A: int = hidden_dropout_prob
_A: Tuple = attention_probs_dropout_prob
_A: Tuple = max_position_embeddings
_A: Dict = eos_token_id
_A: int = pad_token_id
_A: Any = bos_token_id
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_A: List[str] = inputs_dict['''input_ids''']
_A: Tuple = input_ids[:1, :]
_A: List[Any] = inputs_dict['''attention_mask'''][:1, :]
_A: str = inputs_dict['''head_mask''']
_A: Optional[Any] = 1
# first forward pass
_A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_A , _A: List[str] = outputs.to_tuple()
_A: Dict = past_key_values[1]
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
if attention_mask is None:
_A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A: Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : int = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = TFMBartModelTester(self )
_A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' )
_A: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 301
| 0
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (DDPMParallelScheduler,)
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Dict ) -> List[str]:
a = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def __UpperCAmelCase ( self : Tuple ) -> Any:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> int:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = len(__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter
a = self.dummy_sample_deter + 0.1
a = self.dummy_sample_deter - 0.1
a = samplea.shape[0]
a = torch.stack([samplea, samplea, samplea] , dim=0 )
a = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
a = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = len(__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter
a = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
a = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
a = pred_prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(prediction_type="v_prediction" )
a = scheduler_class(**__lowerCamelCase )
a = len(__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter
a = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
a = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
a = pred_prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCamelCase )
a = scheduler.timesteps
for i, timestep in enumerate(__lowerCamelCase ):
if i == len(__lowerCamelCase ) - 1:
a = -1
else:
a = timesteps[i + 1]
a = scheduler.previous_timestep(__lowerCamelCase )
a = prev_t.item()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [1_00, 87, 50, 51, 0]
with self.assertRaises(__lowerCamelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [1_00, 87, 50, 1, 0]
a = len(__lowerCamelCase )
with self.assertRaises(__lowerCamelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
| 107
|
'''simple docstring'''
import string
from math import logaa
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
UpperCAmelCase : Optional[Any] = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase : Tuple = corpus_without_punctuation.split('\n' )
UpperCAmelCase : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCAmelCase_ ))
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return round(tf * idf , 3 )
| 151
| 0
|
from __future__ import annotations
from random import choice
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Dict:
return choice(A__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Any:
a = random_pivot(A__)
# partition based on pivot
# linear time
a = [e for e in lst if e < pivot]
a = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A__) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A__) < k - 1:
return kth_number(A__ , k - len(A__) - 1)
# pivot is in elements smaller than k
else:
return kth_number(A__ , A__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = RoFormerTokenizer
a : Tuple = RoFormerTokenizerFast
a : Dict = True
a : Optional[Any] = True
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowerCAmelCase_ ( self , **A ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self , **A ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = "永和服装饰品有限公司,今天天气非常好"
a = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = self.get_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.get_rust_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 180
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: Any = logging.get_logger(__name__)
UpperCamelCase__: str = {}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """llama"""
lowerCamelCase__ = ["""past_key_values"""]
def __init__( self : str , __snake_case : Any=32000 , __snake_case : int=4096 , __snake_case : Optional[Any]=11008 , __snake_case : int=32 , __snake_case : Any=32 , __snake_case : Any=None , __snake_case : Union[str, Any]="silu" , __snake_case : List[str]=2048 , __snake_case : Optional[int]=0.02 , __snake_case : Any=1E-6 , __snake_case : Tuple=True , __snake_case : int=0 , __snake_case : int=1 , __snake_case : Any=2 , __snake_case : Dict=1 , __snake_case : Any=False , __snake_case : Any=None , **__snake_case : List[Any] , ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Any = num_key_value_heads
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[str] = rms_norm_eps
UpperCAmelCase : List[str] = pretraining_tp
UpperCAmelCase : Optional[int] = use_cache
UpperCAmelCase : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , tie_word_embeddings=__snake_case , **__snake_case , )
def A ( self : int ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
UpperCAmelCase : Optional[int] = self.rope_scaling.get('''type''' , __snake_case )
UpperCAmelCase : Dict = self.rope_scaling.get('''factor''' , __snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__snake_case , __snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 23
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase__: Union[str, Any] = "examples/"
UpperCamelCase__: Optional[Any] = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCamelCase__: Optional[int] = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
UpperCamelCase__: List[Any] = "README.md"
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ) -> Optional[int]:
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
UpperCAmelCase , UpperCAmelCase : List[Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase : List[Any] = replace.replace('''VERSION''' , _lowerCAmelCase )
UpperCAmelCase : Optional[Any] = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[int]:
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern='''examples''' )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase : Optional[int] = '''1. Want to contribute a new model?'''
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
# Find the start of the list.
UpperCAmelCase : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase : Optional[int] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : int = REPLACE_PATTERNS['''init'''][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase : Optional[int] = default_version.base_version
elif patch:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase : Dict = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Tuple = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
def snake_case_ ( ) -> Any:
UpperCAmelCase : List[Any] = get_version()
UpperCAmelCase : List[str] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase : List[Any] = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Dict = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCamelCase__: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 23
| 1
|
"""simple docstring"""
import string
import numpy
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case_ )
class __lowerCamelCase :
__UpperCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__UpperCamelCase = numpy.vectorize(lambda __lowercase : x % 36 )
__UpperCamelCase = numpy.vectorize(__lowercase )
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.modulus(lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowerCAmelCase = encrypt_key.shape[0]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.key_string.index(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.key_string[round(lowerCamelCase )]
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase = det % len(self.key_string )
_lowerCAmelCase = len(self.key_string )
if greatest_common_divisor(lowerCamelCase , len(self.key_string ) ) != 1:
_lowerCAmelCase = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = [char for char in text.upper() if char in self.key_string]
_lowerCAmelCase = chars[-1]
while len(lowerCamelCase ) % self.break_key != 0:
chars.append(lowerCamelCase )
return "".join(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.process_text(text.upper() )
_lowerCAmelCase = """"""
for i in range(0 , len(lowerCamelCase ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase = text[i : i + self.break_key]
_lowerCAmelCase = [self.replace_letters(lowerCamelCase ) for char in batch]
_lowerCAmelCase = numpy.array([vec] ).T
_lowerCAmelCase = self.modulus(self.encrypt_key.dot(lowerCamelCase ) ).T.tolist()[
0
]
_lowerCAmelCase = """""".join(
self.replace_digits(lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase = det % len(self.key_string )
_lowerCAmelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowerCAmelCase = i
break
_lowerCAmelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowerCamelCase ) )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.make_decrypt_key()
_lowerCAmelCase = self.process_text(text.upper() )
_lowerCAmelCase = """"""
for i in range(0 , len(lowerCamelCase ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase = text[i : i + self.break_key]
_lowerCAmelCase = [self.replace_letters(lowerCamelCase ) for char in batch]
_lowerCAmelCase = numpy.array([vec] ).T
_lowerCAmelCase = self.modulus(decrypt_key.dot(lowerCamelCase ) ).T.tolist()[0]
_lowerCAmelCase = """""".join(
self.replace_digits(lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __UpperCAmelCase ( ) -> None:
_lowerCAmelCase = int(input("""Enter the order of the encryption key: """ ) )
_lowerCAmelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(snake_case_ ):
_lowerCAmelCase = [int(snake_case_ ) for x in input().split()]
hill_matrix.append(snake_case_ )
_lowerCAmelCase = HillCipher(numpy.array(snake_case_ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_lowerCAmelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_lowerCAmelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(snake_case_ ) )
elif option == "2":
_lowerCAmelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
from functools import lru_cache
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> set:
lowercase__: Optional[int] = 2
lowercase__: List[str] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__UpperCAmelCase )
if n > 1:
factors.add(__UpperCAmelCase )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
return len(unique_prime_factors(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bool:
return len(set(__UpperCAmelCase ) ) in (0, 1)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list:
lowercase__: Tuple = 2
while True:
# Increment each value of a generated range
lowercase__: int = [base + i for i in range(__UpperCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__: List[Any] = [upf_len(__UpperCAmelCase ) for x in group]
checker.append(__UpperCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__UpperCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 4 ) -> int:
lowercase__: Tuple = run(__UpperCAmelCase )
return results[0] if len(__UpperCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 177
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__A = False
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: List[str] = '''A painting of a squirrel eating a burger '''
lowercase__: str = torch.manual_seed(0 )
lowercase__: Union[str, Any] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__: Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = generator.manual_seed(0 )
lowercase__: List[str] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Tuple = '''A painting of a squirrel eating a burger '''
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: Tuple = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase__: Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 177
| 1
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = XCLIPTextConfig()
# derive patch size from model name
__snake_case : Any = model_name.find("""patch""" )
__snake_case : int = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
__snake_case : str = XCLIPVisionConfig(patch_size=_lowerCamelCase , num_frames=_lowerCamelCase )
if "large" in model_name:
__snake_case : Optional[Any] = 768
__snake_case : Any = 3072
__snake_case : Dict = 12
__snake_case : Tuple = 1024
__snake_case : Optional[int] = 4096
__snake_case : Dict = 16
__snake_case : List[Any] = 24
__snake_case : Dict = 768
__snake_case : Optional[int] = 3072
if model_name == "xclip-large-patch14-16-frames":
__snake_case : Optional[int] = 336
__snake_case : Union[str, Any] = XCLIPConfig.from_text_vision_configs(_lowerCamelCase , _lowerCamelCase )
if "large" in model_name:
__snake_case : List[Any] = 768
return config
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if name == "token_embedding.weight":
__snake_case : Union[str, Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
__snake_case : Dict = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
__snake_case : Tuple = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__snake_case : List[str] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__snake_case : str = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__snake_case : Optional[Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
__snake_case : List[str] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
__snake_case : int = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
__snake_case : List[Any] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
__snake_case : str = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
__snake_case : Optional[int] = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
__snake_case : Optional[int] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
__snake_case : List[str] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
__snake_case : List[Any] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
__snake_case : List[Any] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
__snake_case : List[Any] = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
__snake_case : str = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
__snake_case : str = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
__snake_case : str = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
__snake_case : Tuple = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
__snake_case : Any = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
__snake_case : Union[str, Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__snake_case : int = orig_state_dict.pop(_lowerCamelCase )
if "attn.in_proj" in key:
__snake_case : List[str] = key.split(""".""" )
if key.startswith("""visual""" ):
__snake_case : Optional[int] = key_split[3]
__snake_case : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__snake_case : Optional[Any] = val[
:dim, :
]
__snake_case : str = val[
dim : dim * 2, :
]
__snake_case : List[str] = val[
-dim:, :
]
else:
__snake_case : Tuple = val[
:dim
]
__snake_case : Any = val[
dim : dim * 2
]
__snake_case : str = val[
-dim:
]
else:
if "weight" in key:
__snake_case : List[Any] = val[
:dim, :
]
__snake_case : str = val[
dim : dim * 2, :
]
__snake_case : List[Any] = val[
-dim:, :
]
else:
__snake_case : str = val[:dim]
__snake_case : Optional[int] = val[
dim : dim * 2
]
__snake_case : Dict = val[-dim:]
elif key.startswith("""mit""" ):
__snake_case : int = key_split[2]
__snake_case : Optional[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__snake_case : List[str] = val[:dim, :]
__snake_case : Dict = val[dim : dim * 2, :]
__snake_case : Optional[Any] = val[-dim:, :]
else:
__snake_case : Optional[Any] = val[:dim]
__snake_case : Optional[Any] = val[dim : dim * 2]
__snake_case : List[Any] = val[-dim:]
else:
__snake_case : Tuple = key_split[2]
__snake_case : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
__snake_case : Union[str, Any] = val[:dim, :]
__snake_case : Union[str, Any] = val[
dim : dim * 2, :
]
__snake_case : Optional[Any] = val[-dim:, :]
else:
__snake_case : List[str] = val[:dim]
__snake_case : Tuple = val[
dim : dim * 2
]
__snake_case : Any = val[-dim:]
else:
__snake_case : Optional[int] = rename_key(_lowerCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__snake_case : Dict = val.T
__snake_case : List[Any] = val
return orig_state_dict
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
__snake_case : Optional[Any] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
__snake_case : Tuple = """eating_spaghetti.npy"""
elif num_frames == 32:
__snake_case : Union[str, Any] = """eating_spaghetti_32_frames.npy"""
__snake_case : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=_lowerCamelCase , repo_type="""dataset""" , )
__snake_case : Dict = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
__snake_case : Tuple = model_to_url[model_name]
__snake_case : Dict = 8
if "16-frames" in model_name:
__snake_case : List[Any] = 16
elif "shot" in model_name:
__snake_case : Optional[int] = 32
__snake_case : List[Any] = get_xclip_config(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = XCLIPModel(_lowerCamelCase )
model.eval()
if "drive" in checkpoint_url:
__snake_case : int = """pytorch_model.bin"""
gdown.cached_download(_lowerCamelCase , _lowerCamelCase , quiet=_lowerCamelCase )
__snake_case : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )["""model"""]
else:
__snake_case : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCamelCase )["""model"""]
__snake_case : str = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[Any] = XCLIPModel(_lowerCamelCase )
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__snake_case : List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
__snake_case : str = VideoMAEImageProcessor(size=_lowerCamelCase )
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
__snake_case : int = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
__snake_case : Tuple = XCLIPProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case : Tuple = prepare_video(_lowerCamelCase )
__snake_case : Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=_lowerCamelCase , return_tensors="""pt""" , padding=_lowerCamelCase )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
__snake_case : Dict = model(**_lowerCamelCase )
# Verify outputs
__snake_case : Tuple = outputs.logits_per_video
__snake_case : int = logits_per_video.softmax(dim=1 )
print("""Probs:""" , _lowerCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
__snake_case : Optional[Any] = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
__snake_case : str = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
__snake_case : Dict = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
__snake_case : str = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
__snake_case : Union[str, Any] = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
__snake_case : Tuple = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__snake_case : Dict = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__snake_case : int = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__snake_case : Optional[int] = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__snake_case : str = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__snake_case : Tuple = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__snake_case : Optional[Any] = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__snake_case : Optional[Any] = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__snake_case : str = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__snake_case : Any = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__snake_case : Tuple = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__snake_case : Optional[Any] = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__snake_case : Dict = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowerCamelCase , organization="""nielsr""" )
processor.push_to_hub(_lowerCamelCase , organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowerCamelCase , organization="""nielsr""" )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 13
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: str = '''codegen'''
lowercase__: Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
__snake_case : List[str] = vocab_size
__snake_case : Union[str, Any] = n_ctx
__snake_case : int = n_positions
__snake_case : str = n_embd
__snake_case : Dict = n_layer
__snake_case : List[Any] = n_head
__snake_case : Any = n_inner
__snake_case : str = rotary_dim
__snake_case : List[str] = activation_function
__snake_case : Tuple = resid_pdrop
__snake_case : Dict = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Tuple = layer_norm_epsilon
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = use_cache
__snake_case : Dict = bos_token_id
__snake_case : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ )
if not getattr(self._config , """pad_token_id""" , __magic_name__ ):
# TODO: how to do that better?
__snake_case : List[str] = 0
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
__snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
__snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Tuple = seqlen + 2
__snake_case : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : List[str] = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
__snake_case : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
__snake_case : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 13
| 13
| 1
|
from __future__ import annotations
snake_case : Any = list[list[int]]
# assigning initial values to the grid
snake_case : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
snake_case : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __lowerCamelCase ( UpperCAmelCase_ : Matrix , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __lowerCamelCase ( UpperCAmelCase_ : Matrix ):
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __lowerCamelCase ( UpperCAmelCase_ : Matrix ):
"""simple docstring"""
if location := find_empty_location(UpperCAmelCase_ ):
a , a :Dict = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
a :Optional[Any] = digit
if sudoku(UpperCAmelCase_ ) is not None:
return grid
a :Optional[int] = 0
return None
def __lowerCamelCase ( UpperCAmelCase_ : Matrix ):
"""simple docstring"""
for row in grid:
for cell in row:
print(UpperCAmelCase_ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
snake_case : Optional[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 94
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''file.csv'''
lowerCamelCase__ : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = tmp_path / '''malformed_file.csv'''
lowerCamelCase__ : List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = tmp_path / '''csv_with_image.csv'''
lowerCamelCase__ : int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = tmp_path / '''csv_with_label.csv'''
lowerCamelCase__ : List[Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''csv_with_int_list.csv'''
lowerCamelCase__ : Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = Csv()
lowerCamelCase__ : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCAmelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Tuple = f.read().splitlines()[1]
lowerCamelCase__ : Any = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowerCamelCase__ : List[str] = csv._generate_tables([[csv_file_with_image]] )
lowerCamelCase__ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : List[Any] = f.read().splitlines()[1:]
lowerCamelCase__ : List[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowerCamelCase__ : str = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCAmelCase ) for label in labels]
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[str] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda UpperCAmelCase : [int(UpperCAmelCase ) for i in x.split()]} )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 142
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 113
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {}
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_A : Any = "llama"
_A : Optional[int] = ["past_key_values"]
def __init__( self : Any , lowerCAmelCase__ : int=3_2_0_0_0 , lowerCAmelCase__ : List[Any]=4_0_9_6 , lowerCAmelCase__ : Any=1_1_0_0_8 , lowerCAmelCase__ : Tuple=3_2 , lowerCAmelCase__ : str=3_2 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[int]="silu" , lowerCAmelCase__ : List[str]=2_0_4_8 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : List[Any]=1E-6 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : Any=None , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = num_key_value_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : int = rms_norm_eps
__SCREAMING_SNAKE_CASE : Any = pretraining_tp
__SCREAMING_SNAKE_CASE : int = use_cache
__SCREAMING_SNAKE_CASE : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"got {self.rope_scaling}" )
__SCREAMING_SNAKE_CASE : Optional[int] = self.rope_scaling.get("""type""" , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 112
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254
| 0
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__UpperCAmelCase = float("""nan""")
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = sys.stdout
__lowerCAmelCase : List[str] = open(lowerCAmelCase , """a""" )
def __getattr__( self : List[Any] , lowerCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return getattr(self.stdout , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
self.stdout.write(lowerCAmelCase )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , lowerCAmelCase , 0 , re.M ) )
def snake_case_ (__A : Dict=8_0 , __A : Dict=False ):
__lowerCAmelCase : Dict = []
# deal with critical env vars
__lowerCAmelCase : Optional[Any] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
__lowerCAmelCase : str = os.environ.get(lowercase_ , lowercase_ )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__lowerCAmelCase : Optional[int] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(lowercase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : str = """"""
while len(lowercase_ ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(lowercase_ ) == 0 or len(lowercase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase_ )
__lowerCAmelCase : Any = """"""
return "\\\n".join(lowercase_ )
def snake_case_ (__A : Any , __A : Any ):
__lowerCAmelCase : Tuple = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
__lowerCAmelCase : Tuple = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__lowerCAmelCase : Dict = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def snake_case_ (__A : Dict , __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : int , __A : List[str] , __A : Union[str, Any] ):
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_0_0.2, 55.6666, 2_2_2.2_2_2_2_2_2_2_2] )} , )
__lowerCAmelCase : Optional[Any] = subprocess.run(lowercase_ , capture_output=lowercase_ , text=lowercase_ )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
__lowerCAmelCase : Optional[Any] = variation.replace(""" """ , """-""" )
with open(Path(lowercase_ ) / f'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(lowercase_ ) / f'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : Tuple = json.load(lowercase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def snake_case_ (__A : List[Any] , __A : int , __A : str , __A : Optional[Any] , __A : Dict , __A : Dict , __A : Optional[int] , __A : Dict , __A : str , __A : List[str] , ):
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : List[Any] = f'''{id}: {variation:<{longest_variation_len}}'''
__lowerCAmelCase : Optional[int] = f'''{preamble}: '''
__lowerCAmelCase : Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase_ ) , desc=lowercase_ , leave=lowercase_ ):
__lowerCAmelCase : int = process_run_single(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__lowerCAmelCase : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(lowercase_ ):
metrics.append(lowercase_ )
results.append(lowercase_ )
outcome += "✓"
else:
outcome += "✘"
__lowerCAmelCase : Tuple = f'''\33[2K\r{outcome}'''
if len(lowercase_ ) > 0:
__lowerCAmelCase : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__lowerCAmelCase : Dict = round(mean_metrics[target_metric_key] , 2 )
__lowerCAmelCase : Union[str, Any] = f'''{outcome} {mean_target}'''
if len(lowercase_ ) > 1:
results_str += f''' {tuple(round(lowercase_ , 2 ) for x in results )}'''
print(lowercase_ )
__lowerCAmelCase : Tuple = variation
return mean_metrics
else:
print(lowercase_ )
return {variation_key: variation, target_metric_key: nan}
def snake_case_ ():
__lowerCAmelCase : List[Any] = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f'''
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB
'''
def snake_case_ (__A : int , __A : List[Any] , __A : Tuple , __A : str , __A : Optional[Any] ):
__lowerCAmelCase : Tuple = pd.DataFrame(lowercase_ )
__lowerCAmelCase : Tuple = """variation"""
__lowerCAmelCase : Any = """diff_%"""
__lowerCAmelCase : Tuple = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__lowerCAmelCase : Optional[int] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase_ ):
# as a fallback, use the minimal value as the sentinel
__lowerCAmelCase : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase_ ):
__lowerCAmelCase : List[str] = df.apply(
lambda __A : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
__lowerCAmelCase : str = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__lowerCAmelCase : List[Any] = df.reindex(lowercase_ , axis="""columns""" ) # reorder cols
# capitalize
__lowerCAmelCase : Optional[Any] = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
__lowerCAmelCase : Optional[Any] = df.rename(lambda __A : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
__lowerCAmelCase : Optional[int] = df.rename(lambda __A : c.replace("""_""" , """\n""" ) , axis="""columns""" )
__lowerCAmelCase : Tuple = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase_ , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase_ , floatfmt=""".2f""" )]
print("""\n\n""".join(lowercase_ ) )
def snake_case_ ():
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=lowercase_ , type=lowercase_ , nargs="""+""" , required=lowercase_ , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=lowercase_ , type=lowercase_ , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=lowercase_ , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=lowercase_ , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=lowercase_ , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=lowercase_ , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
__lowerCAmelCase : str = args.output_dir
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
__lowerCAmelCase : Optional[int] = get_base_command(lowercase_ , lowercase_ )
# split each dimension into its --foo variations
__lowerCAmelCase : str = [list(map(str.strip , re.split(r"""\|""" , lowercase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__lowerCAmelCase : Optional[int] = list(map(str.strip , map(""" """.join , itertools.product(*lowercase_ ) ) ) )
__lowerCAmelCase : Union[str, Any] = max(len(lowercase_ ) for x in variations )
# split wanted keys
__lowerCAmelCase : List[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
__lowerCAmelCase : int = f'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
__lowerCAmelCase : List[str] = Tee(lowercase_ )
print(f'''\n*** Running {len(lowercase_ )} benchmarks:''' )
print(f'''Base command: {' '.join(lowercase_ )}''' )
__lowerCAmelCase : int = """variation"""
__lowerCAmelCase : List[str] = []
for id, variation in enumerate(tqdm(lowercase_ , desc="""Total completion: """ , leave=lowercase_ ) ):
__lowerCAmelCase : Tuple = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , args.target_metric_key , lowercase_ , args.repeat_times , lowercase_ , args.verbose , ) )
process_results(lowercase_ , args.target_metric_key , lowercase_ , args.base_variation , lowercase_ )
if __name__ == "__main__":
main()
| 362
|
import numpy as np
import qiskit
def snake_case_ (__A : int = 8 , __A : int | None = None ) -> str:
__lowerCAmelCase : List[Any] = np.random.default_rng(seed=__A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
__lowerCAmelCase : List[Any] = rng.integers(2 , size=__A )
# The set of states Alice will prepare.
__lowerCAmelCase : List[str] = rng.integers(2 , size=__A )
# Measurement basis for Bob's qubits.
__lowerCAmelCase : List[Any] = rng.integers(2 , size=__A )
# Quantum Circuit to simulate BB84
__lowerCAmelCase : int = qiskit.QuantumCircuit(__A , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__A ):
if alice_state[index] == 1:
bbaa_circ.x(__A )
if alice_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__A ):
if bob_basis[index] == 1:
bbaa_circ.h(__A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowerCAmelCase : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowerCAmelCase : Optional[Any] = qiskit.execute(__A , __A , shots=1 , seed_simulator=__A )
# Returns the result of measurement.
__lowerCAmelCase : List[Any] = job.result().get_counts(__A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowerCAmelCase : Optional[int] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__A , __A , __A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowerCAmelCase : Tuple = gen_key[:key_len] if len(__A ) >= key_len else gen_key.ljust(__A , """0""" )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 139
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__UpperCamelCase : Any = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A_ ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ["pixel_values"]
def __init__( self : Tuple ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = True ,lowercase_ : Union[int, float] = 1 / 2_5_5 ,lowercase_ : bool = True ,lowercase_ : bool = True ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,**lowercase_ : str ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 2_5_6}
lowerCAmelCase__ : Any = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
lowerCAmelCase__ : Dict = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase__ : List[str] = get_size_dict(lowercase_ ,param_name='''crop_size''' )
lowerCAmelCase__ : Union[str, Any] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Any = do_center_crop
lowerCAmelCase__ : int = crop_size
lowerCAmelCase__ : List[Any] = resample
lowerCAmelCase__ : Tuple = do_rescale
lowerCAmelCase__ : List[Any] = rescale_factor
lowerCAmelCase__ : str = offset
lowerCAmelCase__ : Any = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Tuple ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Optional[Any] ,):
lowerCAmelCase__ : Any = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
if "shortest_edge" in size:
lowerCAmelCase__ : Any = get_resize_output_image_size(lowercase_ ,size['''shortest_edge'''] ,default_to_square=lowercase_ )
elif "height" in size and "width" in size:
lowerCAmelCase__ : Dict = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowercase_ ,size=lowercase_ ,resample=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Dict ,):
lowerCAmelCase__ : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase_ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : int ,lowercase_ : np.ndarray ,lowercase_ : Union[int, float] ,lowercase_ : bool = True ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Dict ,):
lowerCAmelCase__ : int = image.astype(np.floataa )
if offset:
lowerCAmelCase__ : Tuple = image - (scale / 2)
return rescale(lowercase_ ,scale=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : np.ndarray ,lowercase_ : Union[float, List[float]] ,lowercase_ : Union[float, List[float]] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Dict ,):
return normalize(lowercase_ ,mean=lowercase_ ,std=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : ImageInput ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = None ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = None ,lowercase_ : float = None ,lowercase_ : bool = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = to_numpy_array(lowercase_ )
if do_resize:
lowerCAmelCase__ : Optional[int] = self.resize(image=lowercase_ ,size=lowercase_ ,resample=lowercase_ )
if do_center_crop:
lowerCAmelCase__ : Union[str, Any] = self.center_crop(lowercase_ ,size=lowercase_ )
if do_rescale:
lowerCAmelCase__ : Any = self.rescale(image=lowercase_ ,scale=lowercase_ ,offset=lowercase_ )
if do_normalize:
lowerCAmelCase__ : List[str] = self.normalize(image=lowercase_ ,mean=lowercase_ ,std=lowercase_ )
lowerCAmelCase__ : Optional[Any] = to_channel_dimension_format(lowercase_ ,lowercase_ )
return image
def __lowerCAmelCase ( self : Tuple ,lowercase_ : ImageInput ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = None ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = None ,lowercase_ : float = None ,lowercase_ : bool = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[str, TensorType]] = None ,lowercase_ : ChannelDimension = ChannelDimension.FIRST ,**lowercase_ : Optional[int] ,):
lowerCAmelCase__ : int = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : str = resample if resample is not None else self.resample
lowerCAmelCase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : str = offset if offset is not None else self.offset
lowerCAmelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : int = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = size if size is not None else self.size
lowerCAmelCase__ : Optional[int] = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
lowerCAmelCase__ : Any = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,param_name='''crop_size''' )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCAmelCase__ : Tuple = make_batched(lowercase_ )
lowerCAmelCase__ : Optional[int] = [
[
self._preprocess_image(
image=lowercase_ ,do_resize=lowercase_ ,size=lowercase_ ,resample=lowercase_ ,do_center_crop=lowercase_ ,crop_size=lowercase_ ,do_rescale=lowercase_ ,rescale_factor=lowercase_ ,offset=lowercase_ ,do_normalize=lowercase_ ,image_mean=lowercase_ ,image_std=lowercase_ ,data_format=lowercase_ ,)
for img in video
]
for video in videos
]
lowerCAmelCase__ : Dict = {'''pixel_values''': videos}
return BatchFeature(data=lowercase_ ,tensor_type=lowercase_ )
| 106
|
def UpperCAmelCase__ (UpperCamelCase_ = 4_00_00_00 ):
"""simple docstring"""
snake_case = [0, 1]
snake_case = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case = 0
for j in range(len(UpperCamelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 127
| 0
|
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase = "",__lowerCamelCase = False ):
A__ = {}
# A node will be a leaf if the tree contains its word
A__ = is_leaf
A__ = prefix
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = 0
for q, w in zip(self.prefix,__lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self,__lowerCamelCase ):
for word in words:
self.insert(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
if self.prefix == word:
A__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
A__ = RadixNode(prefix=__lowerCamelCase,is_leaf=__lowerCamelCase )
else:
A__ = self.nodes[word[0]]
A__ , A__ , A__ = incoming_node.match(
__lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
A__ = remaining_prefix
A__ = self.nodes[matching_string[0]]
A__ = RadixNode(__lowerCamelCase,__lowerCamelCase )
A__ = aux_node
if remaining_word == "":
A__ = True
else:
self.nodes[matching_string[0]].insert(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = self.nodes.get(word[0],__lowerCamelCase )
if not incoming_node:
return False
else:
A__ , A__ , A__ = incoming_node.match(
__lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = self.nodes.get(word[0],__lowerCamelCase )
if not incoming_node:
return False
else:
A__ , A__ , A__ = incoming_node.match(
__lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
A__ = list(self.nodes.values() )[0]
A__ = merging_node.is_leaf
self.prefix += merging_node.prefix
A__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
A__ = False
# If there is 1 edge, we merge it with its child
else:
A__ = list(incoming_node.nodes.values() )[0]
A__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
A__ = merging_node.nodes
return True
def UpperCamelCase ( self,__lowerCamelCase = 0 ):
if self.prefix != "":
print('''-''' * height,self.prefix,''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase__( )->bool:
A__ = '''banana bananas bandana band apple all beast'''.split()
A__ = RadixNode()
root.insert_many(__snake_case )
assert all(root.find(__snake_case ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def UpperCamelCase__( )->None:
assert test_trie()
def UpperCamelCase__( )->None:
A__ = RadixNode()
A__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__snake_case )
print('''Words:''' , __snake_case )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 356
|
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : str )->float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
a : str = logging.get_logger(__name__)
a : str = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : str ="""perceiver"""
def __init__( self , lowerCAmelCase__=256 , lowerCAmelCase__=1280 , lowerCAmelCase__=768 , lowerCAmelCase__=1 , lowerCAmelCase__=26 , lowerCAmelCase__=8 , lowerCAmelCase__=8 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="kv" , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=262 , lowerCAmelCase__=2048 , lowerCAmelCase__=56 , lowerCAmelCase__=[368, 496] , lowerCAmelCase__=16 , lowerCAmelCase__=1920 , lowerCAmelCase__=16 , lowerCAmelCase__=[1, 16, 224, 224] , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(**lowerCAmelCase__ )
a : Union[str, Any] = num_latents
a : Union[str, Any] = d_latents
a : List[str] = d_model
a : int = num_blocks
a : List[str] = num_self_attends_per_block
a : Tuple = num_self_attention_heads
a : str = num_cross_attention_heads
a : Tuple = qk_channels
a : Optional[int] = v_channels
a : Any = cross_attention_shape_for_attention
a : Any = self_attention_widening_factor
a : List[str] = cross_attention_widening_factor
a : Optional[Any] = hidden_act
a : Optional[int] = attention_probs_dropout_prob
a : str = initializer_range
a : List[str] = layer_norm_eps
a : Any = use_query_residual
# masked language modeling attributes
a : List[Any] = vocab_size
a : List[Any] = max_position_embeddings
# image classification attributes
a : List[Any] = image_size
# flow attributes
a : Dict = train_size
# multimodal autoencoding attributes
a : int = num_frames
a : Dict = audio_samples_per_frame
a : Union[str, Any] = samples_per_patch
a : Optional[Any] = output_shape
class __UpperCamelCase ( a__ ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __a ( self ) -> float:
return 1E-4
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 40 , lowerCAmelCase__ = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a : str = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a : Tuple = preprocessor.num_special_tokens_to_add(lowerCAmelCase__ )
a : Optional[Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
a : int = [" ".join(["a"] ) * seq_length] * batch_size
a : Tuple = dict(preprocessor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
a : Any = inputs.pop("input_ids" )
return inputs
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a : str = compute_effective_axis_dimension(lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch )
a : List[str] = self._generate_dummy_images(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = dict(preprocessor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
a : Optional[int] = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 370
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100 ) ->int:
'''simple docstring'''
a : Dict = sum(i * i for i in range(1 , n + 1 ) )
a : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79
| 0
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowerCamelCase_ ='''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def a_ ( ) -> int:
"""simple docstring"""
assert _test_patching.open is open
lowerCamelCase_ ='''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def a_ ( ) -> Optional[int]:
"""simple docstring"""
# pandas.read_csv is not present in _test_patching
lowerCamelCase_ ='''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , __snake_case ):
pass
def a_ ( ) -> List[str]:
"""simple docstring"""
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
lowerCamelCase_ ='''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , __snake_case ) is None
with patch_submodule(_test_patching , '''len''' , __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase_ ='''__test_patch_submodule_start_and_stop_mock__'''
lowerCamelCase_ =patch_submodule(_test_patching , '''open''' , __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def a_ ( ) -> List[Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowerCamelCase_ ='''__test_patch_submodule_successive_join__'''
lowerCamelCase_ ='''__test_patch_submodule_successive_dirname__'''
lowerCamelCase_ ='''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , __snake_case ):
with patch_submodule(_test_patching , '''os.rename''' , __snake_case ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , __snake_case ):
with patch_submodule(_test_patching , '''os.path.join''' , __snake_case ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ='''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , __snake_case ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , __snake_case ):
pass
| 75
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case_ ( lowerCAmelCase_ : int = 8 ):
__lowercase : str = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowerCAmelCase_ )
__lowercase : List[Any] = i // 3
__lowercase : int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowercase : str = (
chars_incl
+ random(lowerCAmelCase_ , quotient + remainder )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
)
__lowercase : int = list(lowerCAmelCase_ )
shuffle(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ):
pass # Put your code here...
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
pass # Put your code here...
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
pass # Put your code here...
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int = 8 ):
if len(lowerCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowercase : Tuple = any(char in ascii_uppercase for char in password )
__lowercase : Union[str, Any] = any(char in ascii_lowercase for char in password )
__lowercase : Dict = any(char in digits for char in password )
__lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case_ ( ):
__lowercase : Union[str, Any] = int(input("""Please indicate the max length of your password: """ ).strip() )
__lowercase : List[str] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(lowerCAmelCase_ ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(lowerCAmelCase_ , lowerCAmelCase_ ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 233
| 0
|
"""simple docstring"""
import numpy as np
import datasets
__lowerCamelCase = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
__lowerCamelCase = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
__lowerCamelCase = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def snake_case__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' ,id='sequence' ) ,id='X' ),
} ) ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
# convert to numpy arrays
A__ = np.array(__UpperCAmelCase )
A__ = np.array(__UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
A__ = X - np.mean(__UpperCAmelCase )
A__ = np.cov(reference_distribution.T )
try:
A__ = np.linalg.inv(__UpperCAmelCase )
except np.linalg.LinAlgError:
A__ = np.linalg.pinv(__UpperCAmelCase )
A__ = np.dot(__UpperCAmelCase ,__UpperCAmelCase )
A__ = np.dot(__UpperCAmelCase ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 154
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , UpperCamelCase__ ).groups()[0]
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> List[str]:
A__ = file_names
A__ = image_transform
A__ = label_to_id
def __len__( self ) -> Dict:
return len(self.file_names )
def __getitem__( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = self.file_names[idx]
A__ = PIL.Image.open(__UpperCAmelCase )
A__ = raw_image.convert('RGB' )
if self.image_transform is not None:
A__ = self.image_transform(__UpperCAmelCase )
A__ = extract_label(__UpperCAmelCase )
if self.label_to_id is not None:
A__ = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = config['image_size']
if not isinstance(UpperCamelCase__ , (list, tuple) ):
A__ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
A__ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A__ = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A__ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A__ = os.path.split(UpperCamelCase__ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Grab all the image filenames
A__ = [os.path.join(args.data_dir , UpperCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
A__ = [extract_label(UpperCamelCase__ ) for fname in file_names]
A__ = list(set(UpperCamelCase__ ) )
id_to_label.sort()
A__ = {lbl: i for i, lbl in enumerate(UpperCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase__ )
torch.manual_seed(UpperCamelCase__ )
torch.cuda.manual_seed_all(UpperCamelCase__ )
# Split our filenames between train and validation
A__ = np.random.permutation(len(UpperCamelCase__ ) )
A__ = int(0.8 * len(UpperCamelCase__ ) )
A__ = random_perm[:cut]
A__ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A__ = Compose([RandomResizedCrop(UpperCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A__ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# For evaluation, we use a deterministic Resize
A__ = Compose([Resize(UpperCamelCase__ ), ToTensor()] )
A__ = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# Instantiate dataloaders.
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = create_model('resnet50d' , pretrained=UpperCamelCase__ , num_classes=len(UpperCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A__ = False
for param in model.get_classifier().parameters():
A__ = True
# We normalize the batches of images to be a bit faster.
A__ = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
A__ = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A__ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A__ = OneCycleLR(optimizer=UpperCamelCase__ , max_lr=UpperCamelCase__ , epochs=UpperCamelCase__ , steps_per_epoch=len(UpperCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the starting epoch so files are named properly
A__ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A__ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A__ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A__ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A__ = os.path.splitext(UpperCamelCase__ )[0]
if "epoch" in training_difference:
A__ = int(training_difference.replace('epoch_' , '' ) ) + 1
A__ = None
else:
A__ = int(training_difference.replace('step_' , '' ) )
A__ = resume_step // len(UpperCamelCase__ )
resume_step -= starting_epoch * len(UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
if args.with_tracking:
A__ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A__ = accelerator.skip_first_batches(UpperCamelCase__ , UpperCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A__ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
A__ = model(UpperCamelCase__ )
A__ = torch.nn.functional.cross_entropy(UpperCamelCase__ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
model.eval()
A__ = 0
A__ = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
with torch.no_grad():
A__ = model(UpperCamelCase__ )
A__ = outputs.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['label']) )
A__ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A__ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(UpperCamelCase__ ),
'epoch': epoch,
} , step=UpperCamelCase__ , )
if checkpointing_steps == "epoch":
A__ = F'''epoch_{epoch}'''
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=UpperCamelCase__ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
A__ = parser.parse_args()
A__ = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 154
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.