code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCAmelCase_ ( __a ): '''simple docstring''' a__ = ["image_processor", "tokenizer"] a__ = "OwlViTImageProcessor" a__ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : List[Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Tuple ) -> Optional[int]: __magic_name__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowerCAmelCase_ , ) __magic_name__ = kwargs.pop("""feature_extractor""" ) __magic_name__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) def __call__( self : List[Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="max_length" , UpperCamelCase__ : List[str]="np" , **UpperCamelCase__ : Union[str, Any] ) -> int: if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )): __magic_name__ = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )] elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ): __magic_name__ = [] # Maximum number of queries across batch __magic_name__ = max([len(lowerCAmelCase_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(lowerCAmelCase_ ) != max_num_queries: __magic_name__ = t + [""" """] * (max_num_queries - len(lowerCAmelCase_ )) __magic_name__ = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) encodings.append(lowerCAmelCase_ ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": __magic_name__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __magic_name__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __magic_name__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __magic_name__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __magic_name__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) __magic_name__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __magic_name__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __magic_name__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) __magic_name__ = BatchEncoding() __magic_name__ = input_ids __magic_name__ = attention_mask if query_images is not None: __magic_name__ = BatchEncoding() __magic_name__ = self.image_processor( lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values __magic_name__ = query_pixel_values if images is not None: __magic_name__ = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None and images is not None: __magic_name__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: __magic_name__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ ) def _lowercase ( self : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ) -> int: return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _lowercase ( self : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any] ) -> int: return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _lowercase ( self : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ) -> Dict: return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _lowercase ( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ) -> Optional[int]: return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _lowercase ( self : Optional[int] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ) -> Tuple: return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @property def _lowercase ( self : Dict ) -> Union[str, Any]: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , ) return self.image_processor_class @property def _lowercase ( self : List[Any] ) -> List[Any]: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , ) return self.image_processor
719
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
0
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): '''simple docstring''' a__ = GPTSanJapaneseTokenizer a__ = False a__ = {'''do_clean_text''': False, '''add_prefix_space''': False} def _lowercase ( self : List[Any] ) -> int: """simple docstring""" super().setUp() # fmt: off __magic_name__ = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on __magic_name__ = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀 __magic_name__ = {"""unk_token""": """<unk>"""} __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(__a ) ) def _lowercase ( self : List[str] , **UpperCamelCase__ : Any ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__a ) def _lowercase ( self : List[Any] , UpperCamelCase__ : List[str] ) -> int: """simple docstring""" __magic_name__ = """こんにちは、世界。 \nこんばんは、㔺界。😀""" __magic_name__ = """こんにちは、世界。 \nこんばんは、世界。😀""" return input_text, output_text def _lowercase ( self : Optional[int] , UpperCamelCase__ : Any ) -> str: """simple docstring""" __magic_name__ = self.get_input_output_texts(__a ) __magic_name__ = tokenizer.encode(__a , add_special_tokens=__a ) __magic_name__ = tokenizer.decode(__a , clean_up_tokenization_spaces=__a ) return text, ids def _lowercase ( self : List[str] ) -> Tuple: """simple docstring""" pass # TODO add if relevant def _lowercase ( self : Dict ) -> Any: """simple docstring""" pass # TODO add if relevant def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" pass # TODO add if relevant def _lowercase ( self : Optional[Any] ) -> str: """simple docstring""" __magic_name__ = self.get_tokenizer() # Testing tokenization __magic_name__ = """こんにちは、世界。 こんばんは、㔺界。""" __magic_name__ = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""] __magic_name__ = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens __magic_name__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] __magic_name__ = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens __magic_name__ = tokens + [tokenizer.unk_token] __magic_name__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] __magic_name__ = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , __a ) def _lowercase ( self : Dict ) -> Any: """simple docstring""" __magic_name__ = self.get_tokenizer() # Testing tokenization __magic_name__ = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。""" __magic_name__ = """こんにちは、、、、世界。こんばんは、、、、世界。""" __magic_name__ = tokenizer.encode(__a ) __magic_name__ = tokenizer.decode(__a ) self.assertEqual(__a , __a ) @slow def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization __magic_name__ = """こんにちは、世界。""" __magic_name__ = """こんばんは、㔺界。😀""" __magic_name__ = """こんにちは、世界。こんばんは、世界。😀""" __magic_name__ = tokenizer.encode(prefix_text + input_text ) __magic_name__ = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) __magic_name__ = tokenizer.encode(__a , prefix_text=__a ) __magic_name__ = tokenizer.decode(__a ) __magic_name__ = tokenizer.decode(__a ) __magic_name__ = tokenizer.decode(__a ) self.assertEqual(__a , __a ) self.assertEqual(__a , __a ) self.assertEqual(__a , __a ) @slow def _lowercase ( self : Tuple ) -> Any: """simple docstring""" __magic_name__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization __magic_name__ = """こんにちは、世界。""" __magic_name__ = """こんばんは、㔺界。😀""" __magic_name__ = len(tokenizer.encode(__a ) ) - 2 __magic_name__ = len(tokenizer.encode(__a ) ) - 2 __magic_name__ = [1] + [0] * (len_prefix + len_text + 1) __magic_name__ = [1] * (len_prefix + len_text + 1) + [0] __magic_name__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1) __magic_name__ = tokenizer(prefix_text + input_text ).token_type_ids __magic_name__ = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids __magic_name__ = tokenizer(__a , prefix_text=__a ).token_type_ids self.assertListEqual(__a , __a ) self.assertListEqual(__a , __a ) self.assertListEqual(__a , __a ) @slow def _lowercase ( self : List[str] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) __magic_name__ = tokenizer.encode("""あンいワ""" ) __magic_name__ = tokenizer.encode("""""" , prefix_text="""あンいワ""" ) __magic_name__ = tokenizer.encode("""いワ""" , prefix_text="""あン""" ) self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) ) self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) ) self.assertNotEqual(__a , __a ) self.assertNotEqual(__a , __a ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) __magic_name__ = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]] __magic_name__ = tokenizer(__a , padding=__a ) __magic_name__ = tokenizer.batch_encode_plus(__a , padding=__a ) # fmt: off __magic_name__ = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]] __magic_name__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] __magic_name__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , __a ) self.assertListEqual(x_token.token_type_ids , __a ) self.assertListEqual(x_token.attention_mask , __a ) self.assertListEqual(x_token_a.input_ids , __a ) self.assertListEqual(x_token_a.token_type_ids , __a ) self.assertListEqual(x_token_a.attention_mask , __a ) def _lowercase ( self : Any ) -> Optional[Any]: """simple docstring""" pass def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" pass
720
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = { "microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json", } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """layoutlmv3""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Tuple=5_0265 , UpperCamelCase__ : Union[str, Any]=768 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : List[Any]=128 , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : List[Any]=128 , UpperCamelCase__ : str=64 , UpperCamelCase__ : Optional[Any]=256 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=224 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : Any , ) -> Optional[Any]: """simple docstring""" super().__init__( vocab_size=UpperCAmelCase__ , hidden_size=UpperCAmelCase__ , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , intermediate_size=UpperCAmelCase__ , hidden_act=UpperCAmelCase__ , hidden_dropout_prob=UpperCAmelCase__ , attention_probs_dropout_prob=UpperCAmelCase__ , max_position_embeddings=UpperCAmelCase__ , type_vocab_size=UpperCAmelCase__ , initializer_range=UpperCAmelCase__ , layer_norm_eps=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) __magic_name__ = max_ad_position_embeddings __magic_name__ = coordinate_size __magic_name__ = shape_size __magic_name__ = has_relative_attention_bias __magic_name__ = rel_pos_bins __magic_name__ = max_rel_pos __magic_name__ = has_spatial_attention_bias __magic_name__ = rel_ad_pos_bins __magic_name__ = max_rel_ad_pos __magic_name__ = text_embed __magic_name__ = visual_embed __magic_name__ = input_size __magic_name__ = num_channels __magic_name__ = patch_size __magic_name__ = classifier_dropout class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = version.parse("""1.12""" ) @property def _lowercase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def _lowercase ( self : List[Any] ) -> float: """simple docstring""" return 1E-5 @property def _lowercase ( self : List[str] ) -> int: """simple docstring""" return 12 def _lowercase ( self : str , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , """apply_ocr""" , UpperCAmelCase__ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __magic_name__ = compute_effective_axis_dimension( UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __magic_name__ = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase__ ) __magic_name__ = compute_effective_axis_dimension( UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ ) # Generate dummy inputs according to compute batch and sequence __magic_name__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __magic_name__ = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __magic_name__ = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) __magic_name__ = dict( processor( UpperCAmelCase__ , text=UpperCAmelCase__ , boxes=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , ) ) return inputs
721
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __lowerCAmelCase : Union[str, Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
700
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
0
from math import factorial def a__ ( A_, A_ ): '''simple docstring''' if n < k or k < 0: raise ValueError("""Please enter positive integers for n and k where n >= k""" ) return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', F'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( 'If a class of 40 students must be arranged into groups of', F'''4 for group projects, there are {combinations(40, 4)} ways''', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', F'''are {combinations(10, 3)} ways that first, second and''', 'third place can be awarded.', )
701
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
0
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : int = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : int = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : List[str] = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : Optional[Any] = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __lowerCAmelCase : Optional[int] = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __lowerCAmelCase : Optional[Any] = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __lowerCAmelCase : str = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __lowerCAmelCase : str = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __lowerCAmelCase : List[Any] = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCAmelCase_ ( __snake_case ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCAmelCase_ ( __snake_case ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase : List[str] = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __lowerCAmelCase : str = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __lowerCAmelCase : Union[str, Any] = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(__snake_case ) class UpperCAmelCase_ : '''simple docstring''' def __call__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : Union[str, Any] = False , UpperCamelCase__ : Union[str, Any] = False , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : Dict = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Any , ) -> Optional[int]: """simple docstring""" if titles is None and texts is None: return super().__call__( __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) elif titles is None or texts is None: __magic_name__ = titles if texts is None else texts return super().__call__( __UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) __magic_name__ = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles] __magic_name__ = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts] __magic_name__ = len(__UpperCamelCase ) __magic_name__ = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages if len(__UpperCamelCase ) != len(__UpperCamelCase ): raise ValueError( F'''There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.''' ) __magic_name__ = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["""input_ids"""] __magic_name__ = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["""input_ids"""] __magic_name__ = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase ) ] } if return_attention_mask is not False: __magic_name__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __magic_name__ = attention_mask return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase ) def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] = 16 , UpperCamelCase__ : List[Any] = 64 , UpperCamelCase__ : Any = 4 , ) -> Tuple: """simple docstring""" __magic_name__ = reader_input["""input_ids"""] __magic_name__ , __magic_name__ , __magic_name__ = reader_output[:3] __magic_name__ = len(__UpperCamelCase ) __magic_name__ = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ ) __magic_name__ = [] for doc_id in sorted_docs: __magic_name__ = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __magic_name__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __magic_name__ = sequence_ids.index(self.pad_token_id ) else: __magic_name__ = len(__UpperCamelCase ) __magic_name__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowercase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , ) -> List[Any]: """simple docstring""" __magic_name__ = [] for start_index, start_score in enumerate(__UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __magic_name__ = sorted(__UpperCamelCase , key=lambda UpperCamelCase__ : x[1] , reverse=__UpperCamelCase ) __magic_name__ = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' ) __magic_name__ = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__snake_case ) class UpperCAmelCase_ ( __snake_case , __snake_case ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = READER_PRETRAINED_VOCAB_FILES_MAP a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = READER_PRETRAINED_INIT_CONFIGURATION a__ = ["""input_ids""", """attention_mask"""]
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
0
def a__ ( A_ = 1000000 ): '''simple docstring''' __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = {1: 1} for inputa in range(2, lowerCAmelCase_ ): __magic_name__ = 0 __magic_name__ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: __magic_name__ = (3 * number) + 1 counter += 1 if inputa not in counters: __magic_name__ = counter if counter > pre_counter: __magic_name__ = inputa __magic_name__ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
703
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class UpperCAmelCase_ ( _UpperCAmelCase ): '''simple docstring''' a__ = 42 @flax_register_to_config class UpperCAmelCase_ ( nn.Module , _UpperCAmelCase , _UpperCAmelCase ): '''simple docstring''' a__ = 32 a__ = 4 a__ = 4 a__ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) a__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") a__ = False a__ = (3_20, 6_40, 12_80, 12_80) a__ = 2 a__ = 8 a__ = None a__ = 12_80 a__ = 0.0 a__ = False a__ = jnp.floataa a__ = True a__ = 0 a__ = False def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Any ) -> Any: """simple docstring""" __magic_name__ = (1, self.in_channels, self.sample_size, self.sample_size) __magic_name__ = jnp.zeros(lowercase__ , dtype=jnp.floataa ) __magic_name__ = jnp.ones((1,) , dtype=jnp.intaa ) __magic_name__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) __magic_name__ = jax.random.split(lowercase__ ) __magic_name__ = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(lowercase__ , lowercase__ , lowercase__ , lowercase__ )["params"] def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.block_out_channels __magic_name__ = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( """At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __magic_name__ = self.num_attention_heads or self.attention_head_dim # input __magic_name__ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time __magic_name__ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) __magic_name__ = FlaxTimestepEmbedding(lowercase__ , dtype=self.dtype ) __magic_name__ = self.only_cross_attention if isinstance(lowercase__ , lowercase__ ): __magic_name__ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase__ , lowercase__ ): __magic_name__ = (num_attention_heads,) * len(self.down_block_types ) # down __magic_name__ = [] __magic_name__ = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): __magic_name__ = output_channel __magic_name__ = block_out_channels[i] __magic_name__ = i == len(lowercase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __magic_name__ = FlaxCrossAttnDownBlockaD( in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __magic_name__ = FlaxDownBlockaD( in_channels=lowercase__ , out_channels=lowercase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowercase__ ) __magic_name__ = down_blocks # mid __magic_name__ = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up __magic_name__ = [] __magic_name__ = list(reversed(lowercase__ ) ) __magic_name__ = list(reversed(lowercase__ ) ) __magic_name__ = list(reversed(lowercase__ ) ) __magic_name__ = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): __magic_name__ = output_channel __magic_name__ = reversed_block_out_channels[i] __magic_name__ = reversed_block_out_channels[min(i + 1 , len(lowercase__ ) - 1 )] __magic_name__ = i == len(lowercase__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": __magic_name__ = FlaxCrossAttnUpBlockaD( in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __magic_name__ = FlaxUpBlockaD( in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowercase__ ) __magic_name__ = output_channel __magic_name__ = up_blocks # out __magic_name__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __magic_name__ = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple = True , UpperCamelCase__ : int = False , ) -> Tuple: """simple docstring""" if not isinstance(lowercase__ , jnp.ndarray ): __magic_name__ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowercase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: __magic_name__ = timesteps.astype(dtype=jnp.floataa ) __magic_name__ = jnp.expand_dims(lowercase__ , 0 ) __magic_name__ = self.time_proj(lowercase__ ) __magic_name__ = self.time_embedding(lowercase__ ) # 2. pre-process __magic_name__ = jnp.transpose(lowercase__ , (0, 2, 3, 1) ) __magic_name__ = self.conv_in(lowercase__ ) # 3. down __magic_name__ = (sample,) for down_block in self.down_blocks: if isinstance(lowercase__ , lowercase__ ): __magic_name__ = down_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train ) else: __magic_name__ = down_block(lowercase__ , lowercase__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: __magic_name__ = () for down_block_res_sample, down_block_additional_residual in zip( lowercase__ , lowercase__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) __magic_name__ = new_down_block_res_samples # 4. mid __magic_name__ = self.mid_block(lowercase__ , lowercase__ , lowercase__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: __magic_name__ = down_block_res_samples[-(self.layers_per_block + 1) :] __magic_name__ = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowercase__ , lowercase__ ): __magic_name__ = up_block( lowercase__ , temb=lowercase__ , encoder_hidden_states=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train , ) else: __magic_name__ = up_block(lowercase__ , temb=lowercase__ , res_hidden_states_tuple=lowercase__ , deterministic=not train ) # 6. post-process __magic_name__ = self.conv_norm_out(lowercase__ ) __magic_name__ = nn.silu(lowercase__ ) __magic_name__ = self.conv_out(lowercase__ ) __magic_name__ = jnp.transpose(lowercase__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowercase__ )
704
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
0
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def a__ ( A_ ): '''simple docstring''' if not is_accelerate_available(): return method __magic_name__ = version.parse(accelerate.__version__ ).base_version if version.parse(a_ ) < version.parse("""0.17.0""" ): return method def wrapper(self, *A_, **A_ ): if hasattr(self, """_hf_hook""" ) and hasattr(self._hf_hook, """pre_forward""" ): self._hf_hook.pre_forward(self ) return method(self, *a_, **a_ ) return wrapper
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class UpperCAmelCase_ ( _A , _A , _A , unittest.TestCase ): a__ = StableDiffusionControlNetImgaImgPipeline a__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} a__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} ) a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowercase ( self : Any ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __magic_name__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) __magic_name__ = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) __magic_name__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) __magic_name__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) __magic_name__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __magic_name__ = CLIPTextModel(UpperCamelCase__ ) __magic_name__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __magic_name__ = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[str]=0 ) -> Any: """simple docstring""" if str(UpperCamelCase__ ).startswith("""mps""" ): __magic_name__ = torch.manual_seed(UpperCamelCase__ ) else: __magic_name__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) __magic_name__ = 2 __magic_name__ = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ) __magic_name__ = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) __magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] __magic_name__ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) ) __magic_name__ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def _lowercase ( self : List[str] ) -> int: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _lowercase ( self : Dict ) -> str: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def _lowercase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): a__ = StableDiffusionControlNetImgaImgPipeline a__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} a__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS a__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __magic_name__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(UpperCamelCase__ : Tuple ): if isinstance(UpperCamelCase__ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) __magic_name__ = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(UpperCamelCase__ ) torch.manual_seed(0 ) __magic_name__ = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(UpperCamelCase__ ) torch.manual_seed(0 ) __magic_name__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) __magic_name__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) __magic_name__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __magic_name__ = CLIPTextModel(UpperCamelCase__ ) __magic_name__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __magic_name__ = MultiControlNetModel([controlneta, controlneta] ) __magic_name__ = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any]=0 ) -> List[str]: """simple docstring""" if str(UpperCamelCase__ ).startswith("""mps""" ): __magic_name__ = torch.manual_seed(UpperCamelCase__ ) else: __magic_name__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) __magic_name__ = 2 __magic_name__ = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ), ] __magic_name__ = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) __magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] __magic_name__ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) ) __magic_name__ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def _lowercase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.get_dummy_components() __magic_name__ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) __magic_name__ = 10.0 __magic_name__ = 4 __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = steps __magic_name__ = scale __magic_name__ = pipe(**UpperCamelCase__ )[0] __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = steps __magic_name__ = scale __magic_name__ = pipe(**UpperCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = steps __magic_name__ = scale __magic_name__ = pipe(**UpperCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = steps __magic_name__ = scale __magic_name__ = pipe(**UpperCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def _lowercase ( self : str ) -> List[str]: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def _lowercase ( self : int ) -> Optional[int]: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def _lowercase ( self : int ) -> Optional[int]: """simple docstring""" __magic_name__ = self.get_dummy_components() __magic_name__ = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(UpperCamelCase__ ) except NotImplementedError: pass @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) __magic_name__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase__ , controlnet=UpperCamelCase__ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) __magic_name__ = """evil space-punk bird""" __magic_name__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) __magic_name__ = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) __magic_name__ = pipe( UpperCamelCase__ , UpperCamelCase__ , control_image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) __magic_name__ = output.images[0] assert image.shape == (512, 512, 3) __magic_name__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
706
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
0
import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC __lowerCAmelCase : Optional[Any] = parse(importlib.metadata.version('torch')) def a__ ( A_, A_, A_ ): '''simple docstring''' if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) __magic_name__ = STR_OPERATION_TO_FUNC[operation] if isinstance(lowercase_, lowercase_ ): __magic_name__ = parse(importlib.metadata.version(lowercase_ ) ) return operation(lowercase_, parse(lowercase_ ) ) def a__ ( A_, A_ ): '''simple docstring''' return compare_versions(lowercase_, lowercase_, lowercase_ )
707
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
0
def a__ ( A_ ): '''simple docstring''' __magic_name__ = [[0 for _ in range(__UpperCamelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): __magic_name__ = 1 for n in range(m + 1 ): for k in range(1, __UpperCamelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __lowerCAmelCase : str = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: __lowerCAmelCase : Dict = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
708
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
0
import os import time import numpy as np import onnxruntime as ort __lowerCAmelCase : List[str] = """1""" __lowerCAmelCase : Any = """0""" __lowerCAmelCase : Dict = """1""" __lowerCAmelCase : int = ort.SessionOptions() __lowerCAmelCase : Optional[int] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('Create inference session...') __lowerCAmelCase : Dict = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] __lowerCAmelCase : str = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider) __lowerCAmelCase : str = ort.RunOptions() __lowerCAmelCase : List[str] = 128 __lowerCAmelCase : Any = 1 __lowerCAmelCase : int = np.ones((batch, sequence), dtype=np.intaa) __lowerCAmelCase : Any = np.ones((batch, sequence), dtype=np.intaa) __lowerCAmelCase : Tuple = np.ones((batch, sequence), dtype=np.intaa) print('Warm up phase...') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Start inference...') __lowerCAmelCase : Dict = time.time() __lowerCAmelCase : Optional[Any] = 2000 __lowerCAmelCase : List[Any] = {} for iter in range(max_iters): __lowerCAmelCase : Any = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
709
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class UpperCAmelCase_ ( UpperCamelCase__ ): '''simple docstring''' a__ = """microsoft/speecht5_tts""" a__ = ( """This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """ """text to read (in English) and returns a waveform object containing the sound.""" ) a__ = """text_reader""" a__ = SpeechTaProcessor a__ = SpeechTaForTextToSpeech a__ = SpeechTaHifiGan a__ = ["""text"""] a__ = ["""audio"""] def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" if self.post_processor is None: __magic_name__ = """microsoft/speecht5_hifigan""" super().setup() def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]=None ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.pre_processor(text=_a , return_tensors="""pt""" , truncation=_a ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" ) __magic_name__ = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" ) __magic_name__ = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" with torch.no_grad(): return self.model.generate_speech(**_a ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple ) -> List[str]: """simple docstring""" with torch.no_grad(): return self.post_processor(_a ).cpu().detach()
710
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : Dict = { 'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json', 'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json', 'kssteven/ibert-roberta-large-mnli': ( 'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json' ), } class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' a__ = """ibert""" def __init__( self : Tuple , UpperCamelCase__ : str=3_0522 , UpperCamelCase__ : int=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Optional[int]=1E-12 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Tuple="absolute" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]="none" , **UpperCamelCase__ : Optional[Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = quant_mode __magic_name__ = force_dequant class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @property def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" if self.task == "multiple-choice": __magic_name__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __magic_name__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
711
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( lowerCAmelCase__ ): '''simple docstring''' a__ = ["pixel_values"] def __init__( self : Union[str, Any] , UpperCamelCase__ : Any = True , UpperCamelCase__ : Dict = None , UpperCamelCase__ : List[Any] = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Any] = True , UpperCamelCase__ : Optional[int] = True , UpperCamelCase__ : Optional[int] = 1 / 255 , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : int = True , UpperCamelCase__ : str = None , UpperCamelCase__ : List[str] = None , **UpperCamelCase__ : Any , ) -> None: """simple docstring""" super().__init__(**_lowerCamelCase ) __magic_name__ = size if size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(_lowerCamelCase ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = do_rescale __magic_name__ = do_normalize __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = size __magic_name__ = resample __magic_name__ = rescale_factor __magic_name__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _lowercase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] = PILImageResampling.BILINEAR , UpperCamelCase__ : Tuple = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(_lowerCamelCase ) if "shortest_edge" in size: __magic_name__ = get_resize_output_image_size(_lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __magic_name__ = (size["""height"""], size["""width"""]) else: raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def _lowercase ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any = None , **UpperCamelCase__ : Optional[Any] , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCamelCase , **_lowerCamelCase ) def _lowercase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : str = None , **UpperCamelCase__ : Tuple ) -> np.ndarray: """simple docstring""" return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def _lowercase ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Any = None , **UpperCamelCase__ : Any , ) -> np.ndarray: """simple docstring""" return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] = None , UpperCamelCase__ : List[Any] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : List[str] = None , UpperCamelCase__ : Any = None , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : Optional[int] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> BatchFeature: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(_lowerCamelCase , param_name="""crop_size""" , default_to_square=_lowerCamelCase ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(_lowerCamelCase ) if not is_batched(_lowerCamelCase ): __magic_name__ = [images] if not valid_images(_lowerCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(_lowerCamelCase ) for image in images] if do_resize: __magic_name__ = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images] __magic_name__ = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
712
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
0
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) def a__ ( ): '''simple docstring''' __magic_name__ = os.getenv("""SM_HP_MP_PARAMETERS""", """{}""" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. __magic_name__ = json.loads(__snake_case ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. __magic_name__ = os.getenv("""SM_FRAMEWORK_PARAMS""", """{}""" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". __magic_name__ = json.loads(__snake_case ) if not mpi_options.get("""sagemaker_mpi_enabled""", __snake_case ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("""smdistributed""" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = field( default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , ) def _lowercase ( self : Union[str, Any] ) -> Any: """simple docstring""" super().__post_init__() warnings.warn( """`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """ """`TrainingArguments` instead.""" , UpperCamelCase__ , ) @cached_property def _lowercase ( self : List[Any] ) -> "torch.device": """simple docstring""" logger.info("""PyTorch: setting up devices""" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( """torch.distributed process group is initialized, but local_rank == -1. """ """In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" ) if self.no_cuda: __magic_name__ = torch.device("""cpu""" ) __magic_name__ = 0 elif is_sagemaker_model_parallel_available(): __magic_name__ = smp.local_rank() __magic_name__ = torch.device("""cuda""" , UpperCamelCase__ ) __magic_name__ = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta ) __magic_name__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) ) __magic_name__ = torch.device("""cuda""" , self.local_rank ) __magic_name__ = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 __magic_name__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. __magic_name__ = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta ) __magic_name__ = torch.device("""cuda""" , self.local_rank ) __magic_name__ = 1 if device.type == "cuda": torch.cuda.set_device(UpperCamelCase__ ) return device @property def _lowercase ( self : Dict ) -> List[str]: """simple docstring""" if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def _lowercase ( self : int ) -> Tuple: """simple docstring""" return not is_sagemaker_model_parallel_available() @property def _lowercase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return False
713
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
0
import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class UpperCAmelCase_ ( __lowercase ): '''simple docstring''' a__ = 'microsoft/speecht5_tts' a__ = ( 'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ' 'text to read (in English) and returns a waveform object containing the sound.' ) a__ = 'text_reader' a__ = SpeechTaProcessor a__ = SpeechTaForTextToSpeech a__ = SpeechTaHifiGan a__ = ['text'] a__ = ['audio'] def _lowercase ( self : str ) -> Dict: """simple docstring""" if self.post_processor is None: __magic_name__ = """microsoft/speecht5_hifigan""" super().setup() def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str=None ) -> str: """simple docstring""" __magic_name__ = self.pre_processor(text=__A , return_tensors="""pt""" , truncation=__A ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" ) __magic_name__ = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" ) __magic_name__ = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def _lowercase ( self : Tuple , UpperCamelCase__ : Any ) -> Any: """simple docstring""" with torch.no_grad(): return self.model.generate_speech(**__A ) def _lowercase ( self : int , UpperCamelCase__ : Dict ) -> Dict: """simple docstring""" with torch.no_grad(): return self.post_processor(__A ).cpu().detach()
714
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
0
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def a__ ( A_ = 3 ): '''simple docstring''' if isinstance(UpperCamelCase__, UpperCamelCase__ ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(UpperCamelCase__ ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) __magic_name__ = QuantumRegister(UpperCamelCase__, """qr""" ) __magic_name__ = ClassicalRegister(UpperCamelCase__, """cr""" ) __magic_name__ = QuantumCircuit(UpperCamelCase__, UpperCamelCase__ ) __magic_name__ = number_of_qubits for i in range(UpperCamelCase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(UpperCamelCase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j), UpperCamelCase__, UpperCamelCase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(UpperCamelCase__, number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(UpperCamelCase__, UpperCamelCase__ ) # simulate with 10000 shots __magic_name__ = Aer.get_backend("""qasm_simulator""" ) __magic_name__ = execute(UpperCamelCase__, UpperCamelCase__, shots=10000 ) return job.result().get_counts(UpperCamelCase__ ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
715
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
0
from __future__ import annotations def a__ ( A_, A_, A_, A_, A_, ): '''simple docstring''' __magic_name__ = len(_UpperCAmelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_UpperCAmelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col], [*diagonal_right_collisions, row - col], [*diagonal_left_collisions, row + col], _UpperCAmelCase, _UpperCAmelCase, ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = [] depth_first_search([], [], [], _UpperCAmelCase, _UpperCAmelCase ) # Print all the boards for board in boards: for column in board: print(_UpperCAmelCase ) print("""""" ) print(len(_UpperCAmelCase ), """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
716
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __lowerCAmelCase : Optional[int] = True except ImportError: __lowerCAmelCase : int = False __lowerCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name def a__ ( A_ ) -> str: '''simple docstring''' return AddNewModelCommand(args.testing, args.testing_file, path=args.path ) class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' @staticmethod def _lowercase ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parser.add_parser("""add-new-model""" ) add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" ) add_new_model_parser.add_argument("""--testing_file""" , type=__a , help="""Configuration file on which to run.""" ) add_new_model_parser.add_argument( """--path""" , type=__a , help="""Path to cookiecutter. Should only be used for testing purposes.""" ) add_new_model_parser.set_defaults(func=__a ) def __init__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int=None , *UpperCamelCase__ : Optional[int] ) -> int: """simple docstring""" __magic_name__ = testing __magic_name__ = testing_file __magic_name__ = path def _lowercase ( self : Optional[int] ) -> Any: """simple docstring""" warnings.warn( """The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """ """It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality """ """checks, you should use `transformers-cli add-new-model-like` instead.""" ) if not _has_cookiecutter: raise ImportError( """Model creation dependencies are required to use the `add_new_model` command. Install them by running """ """the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory __magic_name__ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]] if len(__a ) > 0: raise ValueError( """Several directories starting with `cookiecutter-template-` in current working directory. """ """Please clean your directory by removing all folders starting with `cookiecutter-template-` or """ """change your working directory.""" ) __magic_name__ = ( Path(__a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) __magic_name__ = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(__a ) ) else: with open(self._testing_file , """r""" ) as configuration_file: __magic_name__ = json.load(__a ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__a , extra_context=__a , ) __magic_name__ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0] # Retrieve configuration with open(directory + """/configuration.json""" , """r""" ) as configuration_file: __magic_name__ = json.load(__a ) __magic_name__ = configuration['lowercase_modelname'] __magic_name__ = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(F'''{directory}/configuration.json''' ) __magic_name__ = 'PyTorch' in generate_tensorflow_pytorch_and_flax __magic_name__ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax __magic_name__ = 'Flax' in generate_tensorflow_pytorch_and_flax __magic_name__ = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(__a , exist_ok=__a ) os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=__a ) # Tests require submodules as they have parent imports with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ): pass shutil.move( F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , ) shutil.move( F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(UpperCamelCase__ : Any ): with open(__a , """r""" ) as f: __magic_name__ = f.readlines() with open(__a , """w""" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(__a ) if output_pytorch: if not self._testing: remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ): # Create temp file __magic_name__ = mkstemp() __magic_name__ = False with fdopen(__a , """w""" ) as new_file: with open(__a ) as old_file: for line in old_file: new_file.write(__a ) if line_to_copy_below in line: __magic_name__ = True for line_to_copy in lines_to_copy: new_file.write(__a ) if not line_found: raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(__a , __a ) # Remove original file remove(__a ) # Move new file move(__a , __a ) def skip_units(UpperCamelCase__ : Optional[Any] ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(UpperCamelCase__ : Tuple ): with open(__a ) as datafile: __magic_name__ = [] __magic_name__ = False __magic_name__ = False for line in datafile: if "# To replace in: " in line and "##" not in line: __magic_name__ = line.split("""\"""" )[1] __magic_name__ = skip_units(__a ) elif "# Below: " in line and "##" not in line: __magic_name__ = line.split("""\"""" )[1] __magic_name__ = skip_units(__a ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(__a , __a , __a ) __magic_name__ = [] elif "# Replace with" in line and "##" not in line: __magic_name__ = [] elif "##" not in line: lines_to_copy.append(__a ) remove(__a ) replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(__a )
717
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
0
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig __lowerCAmelCase : str = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } __lowerCAmelCase : Any = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """maskformer""" a__ = {"""hidden_size""": """mask_feature_size"""} a__ = ["""resnet""", """swin"""] a__ = ["""detr"""] def __init__( self : int , UpperCamelCase__ : int = 256 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict] = None , UpperCamelCase__ : Optional[Dict] = None , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : float = 20.0 , UpperCamelCase__ : Optional[bool] = None , **UpperCamelCase__ : Optional[Any] , ) -> List[Any]: """simple docstring""" if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k __magic_name__ = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(__A , __A ): __magic_name__ = backbone_config.pop("""model_type""" ) __magic_name__ = CONFIG_MAPPING[backbone_model_type] __magic_name__ = config_class.from_dict(__A ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ''' F'''Supported model types: {','.join(self.backbones_supported )}''' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 __magic_name__ = DetrConfig() else: # verify that the decoder is supported __magic_name__ = ( decoder_config.pop("""model_type""" ) if isinstance(__A , __A ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F'''Transformer Decoder {decoder_type} not supported, please use one of''' F''' {','.join(self.decoders_supported )}''' ) if isinstance(__A , __A ): __magic_name__ = CONFIG_MAPPING[decoder_type] __magic_name__ = config_class.from_dict(__A ) __magic_name__ = backbone_config __magic_name__ = decoder_config # main feature dimension for the model __magic_name__ = fpn_feature_size __magic_name__ = mask_feature_size # initializer __magic_name__ = init_std __magic_name__ = init_xavier_std # Hungarian matcher && loss __magic_name__ = cross_entropy_weight __magic_name__ = dice_weight __magic_name__ = mask_weight __magic_name__ = use_auxiliary_loss __magic_name__ = no_object_weight __magic_name__ = output_auxiliary_logits __magic_name__ = self.decoder_config.encoder_attention_heads __magic_name__ = self.decoder_config.num_hidden_layers super().__init__(**__A ) @classmethod def _lowercase ( cls : Dict , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : PretrainedConfig , **UpperCamelCase__ : Any ) -> int: """simple docstring""" return cls( backbone_config=__A , decoder_config=__A , **__A , ) def _lowercase ( self : int ) -> Dict[str, any]: """simple docstring""" __magic_name__ = copy.deepcopy(self.__dict__ ) __magic_name__ = self.backbone_config.to_dict() __magic_name__ = self.decoder_config.to_dict() __magic_name__ = self.__class__.model_type return output
718
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __lowerCAmelCase : Optional[int] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) __lowerCAmelCase : Optional[int] = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) __lowerCAmelCase : Tuple = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) __lowerCAmelCase : List[Any] = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) __lowerCAmelCase : Any = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) __lowerCAmelCase : Dict = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) __lowerCAmelCase : List[str] = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def a__ ( ): '''simple docstring''' __magic_name__ = randrange(len(_lowerCamelCase ) ), randrange(len(_lowerCamelCase ) ) __magic_name__ = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] __magic_name__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def a__ ( A_ = 100 ): '''simple docstring''' return (generate_random_hand() for _ in range(_lowerCamelCase )) @pytest.mark.parametrize("""hand, expected""", _lowerCamelCase ) def a__ ( A_, A_ ): '''simple docstring''' assert PokerHand(_lowerCamelCase )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""", _lowerCamelCase ) def a__ ( A_, A_ ): '''simple docstring''' assert PokerHand(_lowerCamelCase )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""", _lowerCamelCase ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = PokerHand(_lowerCamelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""", _lowerCamelCase ) def a__ ( A_, A_ ): '''simple docstring''' assert PokerHand(_lowerCamelCase )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""", _lowerCamelCase ) def a__ ( A_, A_ ): '''simple docstring''' assert PokerHand(_lowerCamelCase )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""", _lowerCamelCase ) def a__ ( A_, A_, A_ ): '''simple docstring''' assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected @pytest.mark.parametrize("""hand, other, expected""", generate_random_hands() ) def a__ ( A_, A_, A_ ): '''simple docstring''' assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected def a__ ( ): '''simple docstring''' __magic_name__ = [PokerHand(_lowerCamelCase ) for hand in SORTED_HANDS] __magic_name__ = poker_hands.copy() shuffle(_lowerCamelCase ) __magic_name__ = chain(sorted(_lowerCamelCase ) ) for index, hand in enumerate(_lowerCamelCase ): assert hand == poker_hands[index] def a__ ( ): '''simple docstring''' __magic_name__ = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=_lowerCamelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def a__ ( ): '''simple docstring''' __magic_name__ = PokerHand("""2C 4S AS 3D 5C""" ) __magic_name__ = True __magic_name__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def a__ ( ): '''simple docstring''' __magic_name__ = 0 __magic_name__ = os.path.abspath(os.path.dirname(_lowerCamelCase ) ) __magic_name__ = os.path.join(_lowerCamelCase, """poker_hands.txt""" ) with open(_lowerCamelCase ) as file_hand: for line in file_hand: __magic_name__ = line[:14].strip() __magic_name__ = line[15:].strip() __magic_name__ = PokerHand(_lowerCamelCase ), PokerHand(_lowerCamelCase ) __magic_name__ = player.compare_with(_lowerCamelCase ) if output == "Win": answer += 1 assert answer == 376
719
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = tempfile.mkdtemp() # fmt: off __magic_name__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on __magic_name__ = dict(zip(A__ , range(len(A__ ) ) ) ) __magic_name__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] __magic_name__ = {"""unk_token""": """<unk>"""} __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A__ ) ) __magic_name__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } __magic_name__ = os.path.join(self.tmpdirname , A__ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(A__ , A__ ) def _lowercase ( self : List[str] , **UpperCamelCase__ : str ) -> str: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ ) def _lowercase ( self : str , **UpperCamelCase__ : Optional[int] ) -> Dict: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ ) def _lowercase ( self : Optional[int] , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowercase ( self : List[Any] ) -> Any: """simple docstring""" __magic_name__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __magic_name__ = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_rust_tokenizer() __magic_name__ = self.get_image_processor() __magic_name__ = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) processor_slow.save_pretrained(self.tmpdirname ) __magic_name__ = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A__ ) __magic_name__ = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) processor_fast.save_pretrained(self.tmpdirname ) __magic_name__ = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , A__ ) self.assertIsInstance(processor_fast.tokenizer , A__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , A__ ) self.assertIsInstance(processor_fast.image_processor , A__ ) def _lowercase ( self : Dict ) -> Tuple: """simple docstring""" __magic_name__ = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __magic_name__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __magic_name__ = self.get_image_processor(do_normalize=A__ , padding_value=1.0 ) __magic_name__ = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A__ ) def _lowercase ( self : Tuple ) -> int: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) __magic_name__ = self.prepare_image_inputs() __magic_name__ = image_processor(A__ , return_tensors="""np""" ) __magic_name__ = processor(images=A__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) __magic_name__ = """lower newer""" __magic_name__ = processor(text=A__ ) __magic_name__ = tokenizer(A__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self : Optional[int] ) -> List[str]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) __magic_name__ = """lower newer""" __magic_name__ = self.prepare_image_inputs() __magic_name__ = processor(text=A__ , images=A__ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(A__ ): processor() def _lowercase ( self : Any ) -> Dict: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) __magic_name__ = self.prepare_image_inputs() __magic_name__ = self.prepare_image_inputs() __magic_name__ = processor(images=A__ , visual_prompt=A__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(A__ ): processor() def _lowercase ( self : Dict ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ ) __magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __magic_name__ = processor.batch_decode(A__ ) __magic_name__ = tokenizer.batch_decode(A__ ) self.assertListEqual(A__ , A__ )
720
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
0
from __future__ import annotations from collections.abc import Callable def a__ ( A_, A_, A_, A_ = 100, ): '''simple docstring''' __magic_name__ = x_start __magic_name__ = fnc(lowerCAmelCase__ ) __magic_name__ = 0.0 for _ in range(lowerCAmelCase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area __magic_name__ = (x_end - x_start) / steps + xa __magic_name__ = fnc(lowerCAmelCase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __magic_name__ = xa __magic_name__ = fxa return area if __name__ == "__main__": def a__ ( A_ ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __lowerCAmelCase : Optional[int] = 10 while i <= 100000: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
721
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder __lowerCAmelCase : Tuple = datasets.utils.logging.get_logger(__name__) class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ): '''simple docstring''' a__ = None a__ = None class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilder ): '''simple docstring''' a__ = datasets.Audio() a__ = "audio" a__ = AudioFolderConfig a__ = 42 # definition at the bottom of the script a__ = AudioClassification(audio_column="""audio""" , label_column="""label""" ) __lowerCAmelCase : Optional[Any] = [ """.aiff""", """.au""", """.avr""", """.caf""", """.flac""", """.htk""", """.svx""", """.mat4""", """.mat5""", """.mpc2k""", """.ogg""", """.paf""", """.pvf""", """.raw""", """.rf64""", """.sd2""", """.sds""", """.ircam""", """.voc""", """.w64""", """.wav""", """.nist""", """.wavex""", """.wve""", """.xi""", """.mp3""", """.opus""", ] __lowerCAmelCase : Tuple = AUDIO_EXTENSIONS
700
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
0
def a__ ( A_ ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 __magic_name__ = 1 __magic_name__ = 1 while repunit: __magic_name__ = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def a__ ( A_ = 1000000 ): '''simple docstring''' __magic_name__ = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(A_ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F'''{solution() = }''')
701
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
0
import random from .binary_exp_mod import bin_exp_mod def a__ ( A_, A_=1000 ): '''simple docstring''' if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __magic_name__ = n - 1 __magic_name__ = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __magic_name__ = 0 while count < prec: __magic_name__ = random.randint(2, n - 1 ) __magic_name__ = bin_exp_mod(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) if b != 1: __magic_name__ = True for _ in range(SCREAMING_SNAKE_CASE_ ): if b == n - 1: __magic_name__ = False break __magic_name__ = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter __lowerCAmelCase : Any = 'Create a default config file for Accelerate with only a few flags set.' def a__ ( A_="no", A_ = default_json_config_file, A_ = False ): '''simple docstring''' __magic_name__ = Path(SCREAMING_SNAKE_CASE_ ) path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ ) if path.exists(): print( f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False __magic_name__ = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) __magic_name__ = { 'compute_environment': 'LOCAL_MACHINE', 'mixed_precision': mixed_precision, } if torch.cuda.is_available(): __magic_name__ = torch.cuda.device_count() __magic_name__ = num_gpus __magic_name__ = False if num_gpus > 1: __magic_name__ = 'MULTI_GPU' else: __magic_name__ = 'NO' elif is_xpu_available() and use_xpu: __magic_name__ = torch.xpu.device_count() __magic_name__ = num_xpus __magic_name__ = False if num_xpus > 1: __magic_name__ = 'MULTI_XPU' else: __magic_name__ = 'NO' elif is_npu_available(): __magic_name__ = torch.npu.device_count() __magic_name__ = num_npus __magic_name__ = False if num_npus > 1: __magic_name__ = 'MULTI_NPU' else: __magic_name__ = 'NO' else: __magic_name__ = 0 __magic_name__ = True __magic_name__ = 1 __magic_name__ = 'NO' __magic_name__ = ClusterConfig(**SCREAMING_SNAKE_CASE_ ) config.to_json_file(SCREAMING_SNAKE_CASE_ ) return path def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = parser.add_parser("""default""", parents=SCREAMING_SNAKE_CASE_, help=SCREAMING_SNAKE_CASE_, formatter_class=SCREAMING_SNAKE_CASE_ ) parser.add_argument( """--config_file""", default=SCREAMING_SNAKE_CASE_, help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """ """such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """ """with \'huggingface\'.""" ), dest="""save_location""", ) parser.add_argument( """--mixed_precision""", choices=["""no""", """fp16""", """bf16"""], type=SCREAMING_SNAKE_CASE_, help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""", default="""no""", ) parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) return parser def a__ ( A_ ): '''simple docstring''' __magic_name__ = write_basic_config(args.mixed_precision, args.save_location ) if config_file: print(f'''accelerate configuration saved at {config_file}''' )
703
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
0
from ...configuration_utils import PretrainedConfig __lowerCAmelCase : List[Any] = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class UpperCAmelCase_ ( __A ): '''simple docstring''' a__ = """tapas""" def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=3_0522 , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : List[str]=3072 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : Any=[3, 256, 256, 2, 256, 256, 10] , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Tuple=10.0 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=1.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Dict=1.0 , UpperCamelCase__ : Union[str, Any]=1.0 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[Any]="ratio" , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=64 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Tuple , ) -> List[str]: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_sizes __magic_name__ = initializer_range __magic_name__ = layer_norm_eps # Fine-tuning task hyperparameters __magic_name__ = positive_label_weight __magic_name__ = num_aggregation_labels __magic_name__ = aggregation_loss_weight __magic_name__ = use_answer_as_supervision __magic_name__ = answer_loss_importance __magic_name__ = use_normalized_answer_loss __magic_name__ = huber_loss_delta __magic_name__ = temperature __magic_name__ = aggregation_temperature __magic_name__ = use_gumbel_for_cells __magic_name__ = use_gumbel_for_aggregation __magic_name__ = average_approximation_function __magic_name__ = cell_selection_preference __magic_name__ = answer_loss_cutoff __magic_name__ = max_num_rows __magic_name__ = max_num_columns __magic_name__ = average_logits_per_cell __magic_name__ = select_one_column __magic_name__ = allow_empty_column_selection __magic_name__ = init_cell_selection_weights_to_zero __magic_name__ = reset_position_index_per_cell __magic_name__ = disable_per_token_loss # Aggregation hyperparameters __magic_name__ = aggregation_labels __magic_name__ = no_aggregation_label_index if isinstance(self.aggregation_labels , UpperCamelCase__ ): __magic_name__ = {int(UpperCamelCase__ ): v for k, v in aggregation_labels.items()}
704
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
0
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def a__ ( A_ ): '''simple docstring''' def is_in_circle(A_, A_ ) -> bool: __magic_name__ = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __magic_name__ = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(__snake_case ) ) # The ratio of the area for circle to square is pi/4. __magic_name__ = proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def a__ ( A_, A_, A_ = 0.0, A_ = 1.0, ): '''simple docstring''' return mean( function_to_integrate(uniform(__snake_case, __snake_case ) ) for _ in range(__snake_case ) ) * (max_value - min_value) def a__ ( A_, A_ = 0.0, A_ = 1.0 ): '''simple docstring''' def identity_function(A_ ) -> float: return x __magic_name__ = area_under_curve_estimator( __snake_case, __snake_case, __snake_case, __snake_case ) __magic_name__ = (max_value * max_value - min_value * min_value) / 2 print("""******************""" ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print("""******************""" ) def a__ ( A_ ): '''simple docstring''' def function_to_integrate(A_ ) -> float: return sqrt(4.0 - x * x ) __magic_name__ = area_under_curve_estimator( __snake_case, __snake_case, 0.0, 2.0 ) print("""******************""" ) print("""Estimating pi using area_under_curve_estimator""" ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print("""******************""" ) if __name__ == "__main__": import doctest doctest.testmod()
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : str = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
706
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
0
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = 1.5 __magic_name__ = int(factor * num_class_images ) __magic_name__ = ClipClient( url="""https://knn.laion.ai/knn-service""", indice_name="""laion_400m""", num_images=lowerCAmelCase_, aesthetic_weight=0.1 ) os.makedirs(f'''{class_data_dir}/images''', exist_ok=lowerCAmelCase_ ) if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: __magic_name__ = client.query(text=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1e4: break else: __magic_name__ = int(factor * num_images ) __magic_name__ = ClipClient( url="""https://knn.laion.ai/knn-service""", indice_name="""laion_400m""", num_images=lowerCAmelCase_, aesthetic_weight=0.1, ) __magic_name__ = 0 __magic_name__ = 0 __magic_name__ = tqdm(desc="""downloading real regularization images""", total=lowerCAmelCase_ ) with open(f'''{class_data_dir}/caption.txt''', """w""" ) as fa, open(f'''{class_data_dir}/urls.txt''', """w""" ) as fa, open( f'''{class_data_dir}/images.txt''', """w""" ) as fa: while total < num_class_images: __magic_name__ = class_images[count] count += 1 try: __magic_name__ = requests.get(images["""url"""] ) if img.status_code == 200: __magic_name__ = Image.open(BytesIO(img.content ) ) with open(f'''{class_data_dir}/images/{total}.jpg''', """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(f'''{class_data_dir}/images/{total}.jpg''' + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def a__ ( ): '''simple docstring''' __magic_name__ = argparse.ArgumentParser("""""", add_help=lowerCAmelCase_ ) parser.add_argument("""--class_prompt""", help="""text prompt to retrieve images""", required=lowerCAmelCase_, type=lowerCAmelCase_ ) parser.add_argument("""--class_data_dir""", help="""path to save images""", required=lowerCAmelCase_, type=lowerCAmelCase_ ) parser.add_argument("""--num_class_images""", help="""number of images to download""", default=200, type=lowerCAmelCase_ ) return parser.parse_args() if __name__ == "__main__": __lowerCAmelCase : Dict = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
707
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
0
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : Tuple = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __magic_name__ = 128 elif "12-12" in model_name: __magic_name__ = 12 __magic_name__ = 12 elif "14-14" in model_name: __magic_name__ = 14 __magic_name__ = 14 elif "16-16" in model_name: __magic_name__ = 16 __magic_name__ = 16 else: raise ValueError("""Model not supported""" ) __magic_name__ = """huggingface/label-files""" if "speech-commands" in model_name: __magic_name__ = 35 __magic_name__ = """speech-commands-v2-id2label.json""" else: __magic_name__ = 527 __magic_name__ = """audioset-id2label.json""" __magic_name__ = json.load(open(hf_hub_download(_snake_case, _snake_case, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(_snake_case ): v for k, v in idalabel.items()} __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( A_ ): '''simple docstring''' if "module.v" in name: __magic_name__ = name.replace("""module.v""", """audio_spectrogram_transformer""" ) if "cls_token" in name: __magic_name__ = name.replace("""cls_token""", """embeddings.cls_token""" ) if "dist_token" in name: __magic_name__ = name.replace("""dist_token""", """embeddings.distillation_token""" ) if "pos_embed" in name: __magic_name__ = name.replace("""pos_embed""", """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __magic_name__ = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: __magic_name__ = name.replace("""blocks""", """encoder.layer""" ) if "attn.proj" in name: __magic_name__ = name.replace("""attn.proj""", """attention.output.dense""" ) if "attn" in name: __magic_name__ = name.replace("""attn""", """attention.self""" ) if "norm1" in name: __magic_name__ = name.replace("""norm1""", """layernorm_before""" ) if "norm2" in name: __magic_name__ = name.replace("""norm2""", """layernorm_after""" ) if "mlp.fc1" in name: __magic_name__ = name.replace("""mlp.fc1""", """intermediate.dense""" ) if "mlp.fc2" in name: __magic_name__ = name.replace("""mlp.fc2""", """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __magic_name__ = name.replace("""audio_spectrogram_transformer.norm""", """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: __magic_name__ = name.replace("""module.mlp_head.0""", """classifier.layernorm""" ) if "module.mlp_head.1" in name: __magic_name__ = name.replace("""module.mlp_head.1""", """classifier.dense""" ) return name def a__ ( A_, A_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): __magic_name__ = orig_state_dict.pop(_snake_case ) if "qkv" in key: __magic_name__ = key.split(""".""" ) __magic_name__ = int(key_split[3] ) __magic_name__ = config.hidden_size if "weight" in key: __magic_name__ = val[:dim, :] __magic_name__ = val[dim : dim * 2, :] __magic_name__ = val[-dim:, :] else: __magic_name__ = val[:dim] __magic_name__ = val[dim : dim * 2] __magic_name__ = val[-dim:] else: __magic_name__ = val return orig_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(_snake_case, _snake_case ) @torch.no_grad() def a__ ( A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_audio_spectrogram_transformer_config(_snake_case ) __magic_name__ = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict __magic_name__ = model_name_to_url[model_name] __magic_name__ = torch.hub.load_state_dict_from_url(_snake_case, map_location="""cpu""" ) # remove some keys remove_keys(_snake_case ) # rename some keys __magic_name__ = convert_state_dict(_snake_case, _snake_case ) # load 🤗 model __magic_name__ = ASTForAudioClassification(_snake_case ) model.eval() model.load_state_dict(_snake_case ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __magic_name__ = -4.2677393 if """speech-commands""" not in model_name else -6.845978 __magic_name__ = 4.5689974 if """speech-commands""" not in model_name else 5.5654526 __magic_name__ = 1024 if """speech-commands""" not in model_name else 128 __magic_name__ = ASTFeatureExtractor(mean=_snake_case, std=_snake_case, max_length=_snake_case ) if "speech-commands" in model_name: __magic_name__ = load_dataset("""speech_commands""", """v0.02""", split="""validation""" ) __magic_name__ = dataset[0]["""audio"""]["""array"""] else: __magic_name__ = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""", filename="""sample_audio.flac""", repo_type="""dataset""", ) __magic_name__ , __magic_name__ = torchaudio.load(_snake_case ) __magic_name__ = waveform.squeeze().numpy() __magic_name__ = feature_extractor(_snake_case, sampling_rate=16000, return_tensors="""pt""" ) # forward pass __magic_name__ = model(**_snake_case ) __magic_name__ = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __magic_name__ = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __magic_name__ = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __magic_name__ = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __magic_name__ = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __magic_name__ = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __magic_name__ = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __magic_name__ = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __magic_name__ = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3], _snake_case, atol=1e-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(_snake_case ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(f'''MIT/{model_name}''' ) feature_extractor.push_to_hub(f'''MIT/{model_name}''' ) if __name__ == "__main__": __lowerCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : List[str] = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
708
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
0
from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) class UpperCAmelCase_ ( lowerCAmelCase__ ): '''simple docstring''' a__ = ["pixel_values"] def __init__( self : int , UpperCamelCase__ : List[Any] = True , UpperCamelCase__ : Optional[Any] = 32 , UpperCamelCase__ : Optional[Any]=PILImageResampling.BILINEAR , UpperCamelCase__ : List[Any] = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" __magic_name__ = do_resize __magic_name__ = do_rescale __magic_name__ = size_divisor __magic_name__ = resample super().__init__(**_SCREAMING_SNAKE_CASE ) def _lowercase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : str ) -> np.ndarray: """simple docstring""" __magic_name__ , __magic_name__ = get_image_size(_SCREAMING_SNAKE_CASE ) # Rounds the height and width down to the closest multiple of size_divisor __magic_name__ = height // size_divisor * size_divisor __magic_name__ = width // size_divisor * size_divisor __magic_name__ = resize(_SCREAMING_SNAKE_CASE , (new_h, new_w) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return image def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] = None , **UpperCamelCase__ : List[str] ) -> np.ndarray: """simple docstring""" return rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowercase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : Any = None , UpperCamelCase__ : List[str] = None , UpperCamelCase__ : Optional[int] = ChannelDimension.FIRST , **UpperCamelCase__ : List[str] , ) -> BatchFeature: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = size_divisor if size_divisor is not None else self.size_divisor __magic_name__ = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("""size_divisor is required for resizing""" ) __magic_name__ = make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError("""Invalid image(s)""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for img in images] if do_resize: __magic_name__ = [self.resize(_SCREAMING_SNAKE_CASE , size_divisor=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: __magic_name__ = [self.rescale(_SCREAMING_SNAKE_CASE , scale=1 / 255 ) for image in images] __magic_name__ = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
709
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
0
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : str = { '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class UpperCAmelCase_ ( snake_case__ ): '''simple docstring''' a__ = '''bart''' a__ = ['''past_key_values'''] a__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , UpperCamelCase__ : List[str]=5_0265 , UpperCamelCase__ : Any=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Optional[int]=4096 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : str=1024 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=2 , UpperCamelCase__ : Union[str, Any]=2 , **UpperCamelCase__ : int , ) -> Tuple: """simple docstring""" __magic_name__ = vocab_size __magic_name__ = max_position_embeddings __magic_name__ = d_model __magic_name__ = encoder_ffn_dim __magic_name__ = encoder_layers __magic_name__ = encoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = decoder_layers __magic_name__ = decoder_attention_heads __magic_name__ = dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = activation_function __magic_name__ = init_std __magic_name__ = encoder_layerdrop __magic_name__ = decoder_layerdrop __magic_name__ = classifier_dropout __magic_name__ = use_cache __magic_name__ = encoder_layers __magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _A ): __magic_name__ = self.bos_token_id warnings.warn( F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' """The config can simply be saved and uploaded again to be fixed.""" ) class UpperCAmelCase_ ( snake_case__ ): '''simple docstring''' @property def _lowercase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __magic_name__ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __magic_name__ = {0: 'batch'} __magic_name__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __magic_name__ = {0: 'batch', 1: 'decoder_sequence'} __magic_name__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(_A , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. __magic_name__ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __magic_name__ = self.num_layers for i in range(_A ): __magic_name__ = {0: 'batch', 2: 'past_sequence + sequence'} __magic_name__ = {0: 'batch', 2: 'past_sequence + sequence'} else: __magic_name__ = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def _lowercase ( self : int ) -> str: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __magic_name__ = super().outputs else: __magic_name__ = super(_A , self ).outputs if self.use_past: __magic_name__ = self.num_layers for i in range(_A ): __magic_name__ = {0: 'batch', 2: 'past_sequence + sequence'} __magic_name__ = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _lowercase ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : Optional[Any] = False , UpperCamelCase__ : int = None , ) -> Optional[int]: """simple docstring""" __magic_name__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , _A , _A , _A , _A ) # Generate decoder inputs __magic_name__ = seq_length if not self.use_past else 1 __magic_name__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , _A , _A , _A , _A ) __magic_name__ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} __magic_name__ = dict(**_A , **_A ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __magic_name__ = common_inputs['input_ids'].shape __magic_name__ = common_inputs['decoder_input_ids'].shape[1] __magic_name__ = self.num_attention_heads __magic_name__ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ = decoder_seq_length + 3 __magic_name__ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __magic_name__ = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(_A , _A )] , dim=1 ) __magic_name__ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __magic_name__ = self.num_layers __magic_name__ = min(_A , _A ) __magic_name__ = max(_A , _A ) - min_num_layers __magic_name__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(_A ): common_inputs["past_key_values"].append( ( torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), ) ) # TODO: test this. __magic_name__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(_A , _A ): common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) ) return common_inputs def _lowercase ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] = -1 , UpperCamelCase__ : Dict = -1 , UpperCamelCase__ : Union[str, Any] = False , UpperCamelCase__ : Union[str, Any] = None , ) -> Optional[Any]: """simple docstring""" __magic_name__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , _A , _A , _A , _A ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __magic_name__ = common_inputs['input_ids'].shape # Not using the same length for past_key_values __magic_name__ = seqlen + 2 __magic_name__ = self.num_layers __magic_name__ = self.num_attention_heads __magic_name__ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ = common_inputs['attention_mask'].dtype __magic_name__ = torch.cat( [common_inputs["""attention_mask"""], torch.ones(_A , _A , dtype=_A )] , dim=1 ) __magic_name__ = [ (torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A ) ] return common_inputs def _lowercase ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] = -1 , UpperCamelCase__ : Optional[int] = -1 , UpperCamelCase__ : Tuple = False , UpperCamelCase__ : int = None , ) -> str: """simple docstring""" __magic_name__ = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __magic_name__ = tokenizer.num_special_tokens_to_add(_A ) __magic_name__ = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A ) # Generate dummy inputs according to compute batch and sequence __magic_name__ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __magic_name__ = dict(tokenizer(_A , return_tensors=_A ) ) return common_inputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : str = False , UpperCamelCase__ : Optional[Any] = None , ) -> Any: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __magic_name__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) elif self.task == "causal-lm": __magic_name__ = self._generate_dummy_inputs_for_causal_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) else: __magic_name__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) return common_inputs def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __magic_name__ = super()._flatten_past_key_values_(_A , _A , _A , _A ) else: __magic_name__ = super(_A , self )._flatten_past_key_values_( _A , _A , _A , _A )
710
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
0
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP __lowerCAmelCase : str = False try: __lowerCAmelCase : Any = _is_package_available('google.colab') except ModuleNotFoundError: pass @input.register class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Any = None , UpperCamelCase__ : Optional[Any] = [] ) -> Optional[int]: """simple docstring""" __magic_name__ = 0 __magic_name__ = choices __magic_name__ = prompt if sys.platform == "win32": __magic_name__ = """*""" else: __magic_name__ = """➔ """ def _lowercase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str = "" ) -> Tuple: """simple docstring""" if sys.platform != "win32": writeColor(self.choices[index] , 32 , __a ) else: forceWrite(self.choices[index] , __a ) def _lowercase ( self : str , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" if index == self.position: forceWrite(F''' {self.arrow_char} ''' ) self.write_choice(__a ) else: forceWrite(F''' {self.choices[index]}''' ) reset_cursor() def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict = 1 ) -> List[str]: """simple docstring""" __magic_name__ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__a ) move_cursor(__a , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["""up"""] ) def _lowercase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" self.move_direction(Direction.UP ) @input.mark(KEYMAP["""down"""] ) def _lowercase ( self : List[str] ) -> Any: """simple docstring""" self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["""newline"""] ) def _lowercase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" move_cursor(len(self.choices ) - self.position , """DOWN""" ) return self.position @input.mark(KEYMAP["""interrupt"""] ) def _lowercase ( self : Dict ) -> str: """simple docstring""" move_cursor(len(self.choices ) - self.position , """DOWN""" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] ) def _lowercase ( self : Optional[Any] ) -> List[str]: """simple docstring""" __magic_name__ = int(chr(self.current_selection ) ) __magic_name__ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __a ) else: return else: return def _lowercase ( self : Dict , UpperCamelCase__ : Union[str, Any] = 0 ) -> Optional[Any]: """simple docstring""" if self.prompt: linebreak() forceWrite(self.prompt , """\n""" ) if in_colab: forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" ) else: forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" ) __magic_name__ = default_choice for i in range(len(self.choices ) ): self.print_choice(__a ) forceWrite("""\n""" ) move_cursor(len(self.choices ) - self.position , """UP""" ) with cursor.hide(): while True: if in_colab: try: __magic_name__ = int(builtins.input() ) except ValueError: __magic_name__ = default_choice else: __magic_name__ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , """UP""" ) clear_line() self.write_choice(__a , """\n""" ) return choice
711
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
0
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCAmelCase_ : '''simple docstring''' @staticmethod def _lowercase ( *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[str] ) -> Union[str, Any]: """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' a__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def _lowercase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __magic_name__ = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ) -> List[str]: """simple docstring""" __magic_name__ = object_detector(examples[0] , threshold=0.0 ) __magic_name__ = len(lowerCamelCase_ ) self.assertGreater(lowerCamelCase_ , 0 ) self.assertEqual( lowerCamelCase_ , [ { """score""": ANY(lowerCamelCase_ ), """label""": ANY(lowerCamelCase_ ), """box""": {"""xmin""": ANY(lowerCamelCase_ ), """ymin""": ANY(lowerCamelCase_ ), """xmax""": ANY(lowerCamelCase_ ), """ymax""": ANY(lowerCamelCase_ )}, } for i in range(lowerCamelCase_ ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def _lowercase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" pass @require_torch def _lowercase ( self : Optional[Any] ) -> List[str]: """simple docstring""" __magic_name__ = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __magic_name__ = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) __magic_name__ = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def _lowercase ( self : Any ) -> Tuple: """simple docstring""" __magic_name__ = pipeline("""zero-shot-object-detection""" ) __magic_name__ = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) __magic_name__ = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" pass @require_torch @slow def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = 0.2 __magic_name__ = pipeline("""zero-shot-object-detection""" ) __magic_name__ = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCamelCase_ , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def _lowercase ( self : Dict ) -> str: """simple docstring""" __magic_name__ = 2 __magic_name__ = pipeline("""zero-shot-object-detection""" ) __magic_name__ = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCamelCase_ , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
712
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Tuple = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
713
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
0
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCAmelCase_ : '''simple docstring''' @staticmethod def _lowercase ( *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' a__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def _lowercase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ) -> List[str]: """simple docstring""" __magic_name__ = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __magic_name__ = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = object_detector(examples[0] , threshold=0.0 ) __magic_name__ = len(UpperCamelCase__ ) self.assertGreater(UpperCamelCase__ , 0 ) self.assertEqual( UpperCamelCase__ , [ { """score""": ANY(UpperCamelCase__ ), """label""": ANY(UpperCamelCase__ ), """box""": {"""xmin""": ANY(UpperCamelCase__ ), """ymin""": ANY(UpperCamelCase__ ), """xmax""": ANY(UpperCamelCase__ ), """ymax""": ANY(UpperCamelCase__ )}, } for i in range(UpperCamelCase__ ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def _lowercase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @require_torch def _lowercase ( self : int ) -> Optional[int]: """simple docstring""" __magic_name__ = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) __magic_name__ = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) __magic_name__ = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def _lowercase ( self : int ) -> Tuple: """simple docstring""" __magic_name__ = pipeline("""zero-shot-object-detection""" ) __magic_name__ = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) __magic_name__ = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def _lowercase ( self : Any ) -> str: """simple docstring""" pass @require_torch @slow def _lowercase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __magic_name__ = 0.2 __magic_name__ = pipeline("""zero-shot-object-detection""" ) __magic_name__ = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=UpperCamelCase__ , ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def _lowercase ( self : Any ) -> Union[str, Any]: """simple docstring""" __magic_name__ = 2 __magic_name__ = pipeline("""zero-shot-object-detection""" ) __magic_name__ = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=UpperCamelCase__ , ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
714
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Union[str, Any] = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = ['DeiTFeatureExtractor'] __lowerCAmelCase : Optional[Any] = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
715
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
0
import argparse from collections import defaultdict import yaml __lowerCAmelCase : List[Any] = 'docs/source/en/_toctree.yml' def a__ ( A_ ): '''simple docstring''' __magic_name__ = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 __magic_name__ = [key for key, value in counts.items() if value > 1] __magic_name__ = [] for duplicate_key in duplicates: __magic_name__ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( f'''{duplicate_key} is present several times in the documentation table of content at ''' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] ) # Sort return sorted(snake_case__, key=lambda A_ : s["title"].lower() ) def a__ ( A_=False ): '''simple docstring''' with open(snake_case__, encoding="""utf-8""" ) as f: __magic_name__ = yaml.safe_load(f.read() ) # Get to the API doc __magic_name__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 __magic_name__ = content[api_idx]["""sections"""] # Then to the model doc __magic_name__ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 __magic_name__ = api_doc[model_idx]["""sections"""] __magic_name__ = [(idx, section) for idx, section in enumerate(snake_case__ ) if """sections""" in section] __magic_name__ = False for idx, modality_doc in modalities_docs: __magic_name__ = modality_doc["""sections"""] __magic_name__ = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: __magic_name__ = True if overwrite: __magic_name__ = new_modality_doc if diff: if overwrite: __magic_name__ = model_doc __magic_name__ = api_doc with open(snake_case__, """w""", encoding="""utf-8""" ) as f: f.write(yaml.dump(snake_case__, allow_unicode=snake_case__ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __lowerCAmelCase : Dict = parser.parse_args() check_model_doc(args.fix_and_overwrite)
716
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__) def a__ ( A_, A_ ) -> Optional[int]: '''simple docstring''' return (preds == labels).mean() @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ = field( default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ = field( default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ = field( default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) a__ = field(metadata={"""help""": """Should contain the data files for the task."""} ) a__ = field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ = field( default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def a__ ( ) -> Dict: '''simple docstring''' __magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __magic_name__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""", A_ ) # Set seed set_seed(training_args.seed ) try: __magic_name__ = processors[data_args.task_name]() __magic_name__ = processor.get_labels() __magic_name__ = len(A_ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __magic_name__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=A_, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) __magic_name__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) __magic_name__ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=A_, cache_dir=model_args.cache_dir, ) # Get datasets __magic_name__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=A_, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) __magic_name__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=A_, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def compute_metrics(A_ ) -> Dict: __magic_name__ = np.argmax(p.predictions, axis=1 ) return {"acc": simple_accuracy(A_, p.label_ids )} # Data collator __magic_name__ = DataCollatorWithPadding(A_, pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __magic_name__ = Trainer( model=A_, args=A_, train_dataset=A_, eval_dataset=A_, compute_metrics=A_, data_collator=A_, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __magic_name__ = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __magic_name__ = trainer.evaluate() __magic_name__ = os.path.join(training_args.output_dir, """eval_results.txt""" ) if trainer.is_world_master(): with open(A_, """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""", A_, A_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(A_ ) return results def a__ ( A_ ) -> List[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
717
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
0
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
718
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
0
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __lowerCAmelCase : str = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class UpperCAmelCase_ : '''simple docstring''' a__ = PegasusConfig a__ = {} a__ = """gelu""" def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=False , UpperCamelCase__ : int=99 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=37 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[Any]=20 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : int=1 , UpperCamelCase__ : List[Any]=0 , ) -> List[str]: __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = eos_token_id __magic_name__ = pad_token_id __magic_name__ = bos_token_id def _lowercase ( self : str ) -> Tuple: __magic_name__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __magic_name__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __magic_name__ = np.concatenate([input_ids, eos_tensor] , axis=1 ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __magic_name__ = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return config, inputs_dict def _lowercase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ) -> int: __magic_name__ = 20 __magic_name__ = model_class_name(SCREAMING_SNAKE_CASE_ ) __magic_name__ = model.encode(inputs_dict["""input_ids"""] ) __magic_name__ , __magic_name__ = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __magic_name__ = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __magic_name__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __magic_name__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __magic_name__ = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , ) __magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __magic_name__ = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE_ , ) __magic_name__ = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> Any: __magic_name__ = 20 __magic_name__ = model_class_name(SCREAMING_SNAKE_CASE_ ) __magic_name__ = model.encode(inputs_dict["""input_ids"""] ) __magic_name__ , __magic_name__ = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __magic_name__ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __magic_name__ = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __magic_name__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __magic_name__ = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , ) __magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __magic_name__ = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , ) __magic_name__ = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ ) __magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def a__ ( A_, A_, A_, A_=None, A_=None, ): '''simple docstring''' if attention_mask is None: __magic_name__ = np.not_equal(__a, config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __magic_name__ = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ), ], axis=-1, ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' a__ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) a__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () a__ = True a__ = False a__ = False a__ = False def _lowercase ( self : List[Any] ) -> List[Any]: __magic_name__ = FlaxPegasusModelTester(self ) __magic_name__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple ) -> str: self.config_tester.run_common_tests() def _lowercase ( self : int ) -> List[Any]: __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> str: __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> List[str]: __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __magic_name__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __magic_name__ = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def encode_jitted(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : List[str] ): return model.encode(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) with self.subTest("""JIT Enabled""" ): __magic_name__ = encode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __magic_name__ = encode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) def _lowercase ( self : Optional[Any] ) -> List[str]: __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __magic_name__ = model_class(SCREAMING_SNAKE_CASE_ ) __magic_name__ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __magic_name__ = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): return model.decode( decoder_input_ids=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , encoder_outputs=SCREAMING_SNAKE_CASE_ , ) with self.subTest("""JIT Enabled""" ): __magic_name__ = decode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __magic_name__ = decode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowercase ( self : List[Any] ) -> List[Any]: for model_class_name in self.all_model_classes: __magic_name__ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=SCREAMING_SNAKE_CASE_ ) __magic_name__ = np.ones((1, 1) ) __magic_name__ = model(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @slow def _lowercase ( self : Tuple ) -> Union[str, Any]: __magic_name__ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __magic_name__ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __magic_name__ = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __magic_name__ = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __magic_name__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" , truncation=SCREAMING_SNAKE_CASE_ , max_length=512 , padding=SCREAMING_SNAKE_CASE_ ) __magic_name__ = model.generate(**SCREAMING_SNAKE_CASE_ , num_beams=2 ).sequences __magic_name__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) assert tgt_text == decoded
719
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
0
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def a__ ( A_ ): '''simple docstring''' __magic_name__ = args.pruning_method __magic_name__ = args.threshold __magic_name__ = args.model_name_or_path.rstrip("""/""" ) __magic_name__ = args.target_model_path print(f'''Load fine-pruned model from {model_name_or_path}''' ) __magic_name__ = torch.load(os.path.join(A_, """pytorch_model.bin""" ) ) __magic_name__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __magic_name__ = tensor print(f'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __magic_name__ = tensor print(f'''Copied layer {name}''' ) elif "bias" in name: __magic_name__ = tensor print(f'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __magic_name__ = MagnitudeBinarizer.apply(inputs=A_, threshold=A_ ) __magic_name__ = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __magic_name__ = name[:-6] __magic_name__ = model[f'''{prefix_}mask_scores'''] __magic_name__ = TopKBinarizer.apply(A_, A_ ) __magic_name__ = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __magic_name__ = name[:-6] __magic_name__ = model[f'''{prefix_}mask_scores'''] __magic_name__ = ThresholdBinarizer.apply(A_, A_, A_ ) __magic_name__ = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __magic_name__ = name[:-6] __magic_name__ = model[f'''{prefix_}mask_scores'''] __magic_name__ = -0.1, 1.1 __magic_name__ = torch.sigmoid(A_ ) __magic_name__ = s * (r - l) + l __magic_name__ = s_bar.clamp(min=0.0, max=1.0 ) __magic_name__ = tensor * mask print(f'''Pruned layer {name}''' ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: __magic_name__ = os.path.join( os.path.dirname(A_ ), f'''bertarized_{os.path.basename(A_ )}''' ) if not os.path.isdir(A_ ): shutil.copytree(A_, A_ ) print(f'''\nCreated folder {target_model_path}''' ) torch.save(A_, os.path.join(A_, """pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) __lowerCAmelCase : Optional[int] = parser.parse_args() main(args)
720
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
0
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def a__ ( A_ = "isbn/0140328726" ): '''simple docstring''' __magic_name__ = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: __magic_name__ = f'''{olid} is not a valid Open Library olid''' raise ValueError(A_ ) return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json() def a__ ( A_ ): '''simple docstring''' __magic_name__ = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __magic_name__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __magic_name__ = [ get_openlibrary_data(author["""key"""] )['''name'''] for author in data['''Authors'''] ] __magic_name__ = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(A_, A_ ): __magic_name__ = ''', '''.join(A_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __lowerCAmelCase : Dict = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: __lowerCAmelCase : Union[str, Any] = summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print('\n'.join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
721
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=13 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Dict=99 , UpperCamelCase__ : Any=[1, 1, 2] , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : str=8 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Optional[Any]="gelu_new" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=512 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=False , ) -> int: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = block_sizes __magic_name__ = num_decoder_layers __magic_name__ = d_model __magic_name__ = n_head __magic_name__ = d_head __magic_name__ = d_inner __magic_name__ = hidden_act __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = 2 __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope __magic_name__ = initializer_std # Used in the tests to check the size of the first attention layer __magic_name__ = n_head # Used in the tests to check the size of the first hidden state __magic_name__ = self.d_model # Used in the tests to check the number of output hidden states/attentions __magic_name__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __magic_name__ = self.num_hidden_layers + 2 def _lowercase ( self : List[Any] ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def _lowercase ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , ) -> Dict: """simple docstring""" __magic_name__ = TFFunnelModel(config=_lowerCamelCase ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(_lowerCamelCase ) __magic_name__ = [input_ids, input_mask] __magic_name__ = model(_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __magic_name__ = False __magic_name__ = TFFunnelModel(config=_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __magic_name__ = False __magic_name__ = TFFunnelModel(config=_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = TFFunnelBaseModel(config=_lowerCamelCase ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(_lowerCamelCase ) __magic_name__ = [input_ids, input_mask] __magic_name__ = model(_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __magic_name__ = False __magic_name__ = TFFunnelBaseModel(config=_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __magic_name__ = False __magic_name__ = TFFunnelBaseModel(config=_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , ) -> Dict: """simple docstring""" __magic_name__ = TFFunnelForPreTraining(config=_lowerCamelCase ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , ) -> Tuple: """simple docstring""" __magic_name__ = TFFunnelForMaskedLM(config=_lowerCamelCase ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , ) -> int: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = TFFunnelForSequenceClassification(config=_lowerCamelCase ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , ) -> List[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = TFFunnelForMultipleChoice(config=_lowerCamelCase ) __magic_name__ = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) __magic_name__ = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = TFFunnelForTokenClassification(config=_lowerCamelCase ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , ) -> List[str]: """simple docstring""" __magic_name__ = TFFunnelForQuestionAnswering(config=_lowerCamelCase ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(_lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Dict ) -> Tuple: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' a__ = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) a__ = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def _lowercase ( self : List[str] ) -> Dict: """simple docstring""" __magic_name__ = TFFunnelModelTester(self ) __magic_name__ = ConfigTester(self , config_class=_lowerCamelCase ) def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : int ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase ) def _lowercase ( self : str ) -> Optional[int]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) def _lowercase ( self : Optional[int] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) @require_tf class UpperCAmelCase_ ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' a__ = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) a__ = False a__ = False def _lowercase ( self : Optional[Any] ) -> str: """simple docstring""" __magic_name__ = TFFunnelModelTester(self , base=_lowerCamelCase ) __magic_name__ = ConfigTester(self , config_class=_lowerCamelCase ) def _lowercase ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*_lowerCamelCase ) def _lowercase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def _lowercase ( self : Dict ) -> Tuple: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
700
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
0
from __future__ import annotations from random import choice def a__ ( A_ ): '''simple docstring''' return choice(lowerCamelCase_ ) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = random_pivot(lowerCamelCase_ ) # partition based on pivot # linear time __magic_name__ = [e for e in lst if e < pivot] __magic_name__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(lowerCamelCase_ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(lowerCamelCase_ ) < k - 1: return kth_number(lowerCamelCase_, k - len(lowerCamelCase_ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(lowerCamelCase_, lowerCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
701
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
0
import socket def a__ ( ): '''simple docstring''' __magic_name__ = socket.socket(socket.AF_INET, socket.SOCK_STREAM ) __magic_name__ = socket.gethostname() __magic_name__ = 12312 sock.connect((host, port) ) sock.send(b"""Hello server!""" ) with open("""Received_file""", """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: __magic_name__ = sock.recv(1024 ) if not data: break out_file.write(UpperCamelCase__ ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
0
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): __lowerCAmelCase : Optional[Any] = True from torch.cuda.amp import autocast __lowerCAmelCase : Optional[int] = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ = field( default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ = field( default=snake_case__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) a__ = field( default=snake_case__ , metadata={"""help""": """Whether to log verbose messages or not."""} , ) a__ = field( default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} ) a__ = field( default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} ) a__ = field( default=0.99_99_95 , metadata={"""help""": """Decay of gumbel temperature during training."""} ) def a__ ( A_, A_ ): '''simple docstring''' logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], ) __magic_name__ = logging.WARNING if model_args.verbose_logging: __magic_name__ = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): __magic_name__ = logging.INFO logger.setLevel(__SCREAMING_SNAKE_CASE ) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( default=snake_case__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a__ = field( default=snake_case__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) a__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) a__ = field( default="""validation""" , metadata={ """help""": ( """The name of the validation data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) a__ = field( default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , ) a__ = field( default=snake_case__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a__ = field( default=1 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) a__ = field( default=snake_case__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) a__ = field( default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} ) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = 42 a__ = 42 a__ = """longest""" a__ = None a__ = None def __call__( self : Dict , UpperCamelCase__ : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extractor.pad( lowercase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) __magic_name__ = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) __magic_name__ = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula __magic_name__ = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) __magic_name__ = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to __magic_name__ = 1 __magic_name__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices __magic_name__ = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowercase_ , min_masks=2 , ) return batch class UpperCAmelCase_ ( snake_case__ ): '''simple docstring''' def __init__( self : Optional[int] , *UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Union[str, Any]=1.0 , **UpperCamelCase__ : List[str] ) -> int: """simple docstring""" super().__init__(*lowercase_ , **lowercase_ ) __magic_name__ = 0 __magic_name__ = max_gumbel_temp __magic_name__ = min_gumbel_temp __magic_name__ = gumbel_temp_decay def _lowercase ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" model.train() __magic_name__ = self._prepare_inputs(lowercase_ ) if self.use_amp: with autocast(): __magic_name__ = self.compute_loss(lowercase_ , lowercase_ ) else: __magic_name__ = self.compute_loss(lowercase_ , lowercase_ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": __magic_name__ = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __magic_name__ = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: __magic_name__ = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowercase_ ).backward() elif self.use_apex: with amp.scale_loss(lowercase_ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowercase_ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def a__ ( ): '''simple docstring''' __magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __magic_name__ = parser.parse_args_into_dataclasses() configure_logger(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" __magic_name__ = DatasetDict() __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''', cache_dir=model_args.cache_dir, ) __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''', cache_dir=model_args.cache_dir, ) else: # make sure only "validation" and "train" keys remain" __magic_name__ = DatasetDict() __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split="""validation""", cache_dir=model_args.cache_dir, ) __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f'''{data_args.train_split_name}''', cache_dir=model_args.cache_dir, ) # only normalized-inputs-training is supported __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=__SCREAMING_SNAKE_CASE ) def prepare_dataset(A_ ): # check that all files have the correct sampling rate __magic_name__ = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays __magic_name__ = datasets.map( __SCREAMING_SNAKE_CASE, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long __magic_name__ = vectorized_datasets.filter( lambda A_ : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(A_ ): return feature_extractor(batch["""speech"""], sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` __magic_name__ = vectorized_datasets.map( __SCREAMING_SNAKE_CASE, batched=__SCREAMING_SNAKE_CASE, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets["""train"""].column_names, ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 __magic_name__ = WavaVecaConfig.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) __magic_name__ = WavaVecaForPreTraining(__SCREAMING_SNAKE_CASE ) __magic_name__ = DataCollatorForWavaVecaPretraining(model=__SCREAMING_SNAKE_CASE, feature_extractor=__SCREAMING_SNAKE_CASE ) __magic_name__ = WavaVecaPreTrainer( model=__SCREAMING_SNAKE_CASE, data_collator=__SCREAMING_SNAKE_CASE, args=__SCREAMING_SNAKE_CASE, train_dataset=vectorized_datasets["""train"""], eval_dataset=vectorized_datasets["""validation"""], tokenizer=__SCREAMING_SNAKE_CASE, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, ) trainer.train() if __name__ == "__main__": main()
703
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
0
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = "https://openaipublic.azureedge.net/jukebox/models/" __lowerCAmelCase : Optional[Any] = { "jukebox-1b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "1b_lyrics/prior_level_2.pth.tar", ], "jukebox-5b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "5b_lyrics/prior_level_2.pth.tar", ], } def a__ ( A_ ): '''simple docstring''' if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10: __magic_name__ = key.replace(""".model.1.bias""", """.conv1d_1.bias""" ) elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10: __magic_name__ = key.replace(""".model.1.weight""", """.conv1d_1.weight""" ) elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10: __magic_name__ = key.replace(""".model.3.bias""", """.conv1d_2.bias""" ) elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10: __magic_name__ = key.replace(""".model.3.weight""", """.conv1d_2.weight""" ) if "conditioner_blocks.0." in key: __magic_name__ = key.replace("""conditioner_blocks.0""", """conditioner_blocks""" ) if "prime_prior" in key: __magic_name__ = key.replace("""prime_prior""", """encoder""" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: __magic_name__ = key.replace(""".emb.""", """.""" ) if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(""".k""", """.codebook""" ) if "y_emb." in key: return key.replace("""y_emb.""", """metadata_embedding.""" ) if "x_emb.emb." in key: __magic_name__ = key.replace("""0.x_emb.emb""", """embed_tokens""" ) if "prime_state_ln" in key: return key.replace("""prime_state_ln""", """encoder.final_layer_norm""" ) if ".ln" in key: return key.replace(""".ln""", """.layer_norm""" ) if "_ln" in key: return key.replace("""_ln""", """_layer_norm""" ) if "prime_state_proj" in key: return key.replace("""prime_state_proj""", """encoder.proj_in""" ) if "prime_x_out" in key: return key.replace("""prime_x_out""", """encoder.lm_head""" ) if "prior.x_out" in key: return key.replace("""x_out""", """fc_proj_out""" ) if "x_emb" in key: return key.replace("""x_emb""", """embed_tokens""" ) return key def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = {} import re __magic_name__ = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) __magic_name__ = re.compile( R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) __magic_name__ = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) __magic_name__ = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) __magic_name__ = re.compile( R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) __magic_name__ = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) __magic_name__ = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" ) __magic_name__ = re.compile( R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) __magic_name__ = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(A_ ): __magic_name__ = re_encoder_block_conv_in.match(A_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) __magic_name__ = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' __magic_name__ = re_encoder_block_conv_in.sub(A_, A_ ) elif re_encoder_block_resnet.fullmatch(A_ ): __magic_name__ = re_encoder_block_resnet.match(A_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) __magic_name__ = {"""1""": 1, """3""": 2}[groups[-2]] __magic_name__ = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' __magic_name__ = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' __magic_name__ = prefix + resnet_block __magic_name__ = re_encoder_block_resnet.sub(A_, A_ ) elif re_encoder_block_proj_out.fullmatch(A_ ): __magic_name__ = re_encoder_block_proj_out.match(A_ ) __magic_name__ = regex_match.groups() __magic_name__ = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' __magic_name__ = re_encoder_block_proj_out.sub(A_, A_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(A_ ): __magic_name__ = re_decoder_block_conv_out.match(A_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2 __magic_name__ = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' __magic_name__ = re_decoder_block_conv_out.sub(A_, A_ ) elif re_decoder_block_resnet.fullmatch(A_ ): __magic_name__ = re_decoder_block_resnet.match(A_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2 __magic_name__ = {"""1""": 1, """3""": 2}[groups[-2]] __magic_name__ = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' __magic_name__ = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' __magic_name__ = prefix + resnet_block __magic_name__ = re_decoder_block_resnet.sub(A_, A_ ) elif re_decoder_block_proj_in.fullmatch(A_ ): __magic_name__ = re_decoder_block_proj_in.match(A_ ) __magic_name__ = regex_match.groups() __magic_name__ = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' __magic_name__ = re_decoder_block_proj_in.sub(A_, A_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(A_ ): __magic_name__ = re_prior_cond_conv_out.match(A_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2 __magic_name__ = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' __magic_name__ = re_prior_cond_conv_out.sub(A_, A_ ) elif re_prior_cond_resnet.fullmatch(A_ ): __magic_name__ = re_prior_cond_resnet.match(A_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2 __magic_name__ = {"""1""": 1, """3""": 2}[groups[-2]] __magic_name__ = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' __magic_name__ = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' __magic_name__ = prefix + resnet_block __magic_name__ = re_prior_cond_resnet.sub(A_, A_ ) elif re_prior_cond_proj_in.fullmatch(A_ ): __magic_name__ = re_prior_cond_proj_in.match(A_ ) __magic_name__ = regex_match.groups() __magic_name__ = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' __magic_name__ = re_prior_cond_proj_in.sub(A_, A_ ) # keep original key else: __magic_name__ = original_key __magic_name__ = replace_key(A_ ) if f'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(f'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape: __magic_name__ = model_state_dict[f'''{key_prefix}.{key}'''] print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) __magic_name__ = original_key __magic_name__ = original_key __magic_name__ = value return new_dict @torch.no_grad() def a__ ( A_=None, A_=None ): '''simple docstring''' for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): __magic_name__ = requests.get(f'''{PREFIX}{file}''', allow_redirects=A_ ) os.makedirs(f'''{pytorch_dump_folder_path}/''', exist_ok=A_ ) open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''', """wb""" ).write(r.content ) __magic_name__ = MODEL_MAPPING[model_name.split("""/""" )[-1]] __magic_name__ = JukeboxConfig.from_pretrained(A_ ) __magic_name__ = JukeboxModel(A_ ) __magic_name__ = [] __magic_name__ = {} for i, dict_name in enumerate(A_ ): __magic_name__ = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["""model"""] __magic_name__ = {} for k in old_dic.keys(): if k.endswith(""".b""" ): __magic_name__ = old_dic[k] elif k.endswith(""".w""" ): __magic_name__ = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: __magic_name__ = old_dic[k] else: __magic_name__ = old_dic[k] __magic_name__ = """vqvae""" if i == 0 else f'''priors.{3 - i}''' __magic_name__ = fix_jukebox_keys(A_, model.state_dict(), A_, A_ ) weight_dict.append(A_ ) __magic_name__ = weight_dict.pop(0 ) model.vqvae.load_state_dict(A_ ) for i in range(len(A_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(A_ ).mkdir(exist_ok=A_ ) with open(f'''{pytorch_dump_folder_path}/mapping.json''', """w""" ) as txtfile: json.dump(A_, A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) return weight_dict if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) __lowerCAmelCase : Dict = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
704
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
0
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=224 , UpperCamelCase__ : Optional[Any]=1000 , UpperCamelCase__ : Dict=[3, 3, 6, 4] , UpperCamelCase__ : str=[48, 56, 112, 220] , ) -> List[Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = num_channels __magic_name__ = is_training __magic_name__ = use_labels __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = num_labels __magic_name__ = image_size __magic_name__ = layer_depths __magic_name__ = embed_dims def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.num_labels ) __magic_name__ = self.get_config() return config, pixel_values, labels def _lowercase ( self : List[str] ) -> Any: """simple docstring""" return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCamelCase__ , layer_scale_init_value=1E-5 , ) def _lowercase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" __magic_name__ = SwiftFormerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = SwiftFormerForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) __magic_name__ = SwiftFormerForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Dict ) -> Any: """simple docstring""" (__magic_name__) = self.prepare_config_and_inputs() __magic_name__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ): '''simple docstring''' a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () a__ = ( {'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def _lowercase ( self : List[Any] ) -> int: """simple docstring""" __magic_name__ = SwiftFormerModelTester(self ) __magic_name__ = ConfigTester( self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _lowercase ( self : Dict ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def _lowercase ( self : Optional[int] ) -> int: """simple docstring""" pass def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) __magic_name__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def _lowercase ( self : Dict ) -> Dict: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) __magic_name__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _lowercase ( self : Any ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : Any ) -> int: """simple docstring""" for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = SwiftFormerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def _lowercase ( self : Dict ) -> Any: """simple docstring""" pass def _lowercase ( self : str ) -> int: """simple docstring""" def check_hidden_states_output(UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ): __magic_name__ = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): __magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) __magic_name__ = outputs.hidden_states __magic_name__ = 8 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(UpperCamelCase__ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" def _config_zero_init(UpperCamelCase__ : str ): __magic_name__ = copy.deepcopy(UpperCamelCase__ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(UpperCamelCase__ , UpperCamelCase__ , 1E-10 ) if isinstance(getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ): __magic_name__ = _config_zero_init(getattr(UpperCamelCase__ , UpperCamelCase__ ) ) setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return configs_no_init __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = _config_zero_init(UpperCamelCase__ ) for model_class in self.all_model_classes: __magic_name__ = model_class(config=UpperCamelCase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _lowercase ( self : List[Any] ) -> int: """simple docstring""" pass def a__ ( ): '''simple docstring''' __magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self : List[Any] ) -> Tuple: """simple docstring""" return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def _lowercase ( self : List[Any] ) -> Tuple: """simple docstring""" __magic_name__ = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(UpperCamelCase__ ) __magic_name__ = self.default_image_processor __magic_name__ = prepare_img() __magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): __magic_name__ = model(**UpperCamelCase__ ) # verify the logits __magic_name__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase : str = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
706
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
0
from random import randint from tempfile import TemporaryFile import numpy as np def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = 0 if start < end: __magic_name__ = randint(A_, A_ ) __magic_name__ = a[end] __magic_name__ = a[pivot] __magic_name__ = temp __magic_name__ = _in_place_partition(A_, A_, A_ ) count += _in_place_quick_sort(A_, A_, p - 1 ) count += _in_place_quick_sort(A_, p + 1, A_ ) return count def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = 0 __magic_name__ = randint(A_, A_ ) __magic_name__ = a[end] __magic_name__ = a[pivot] __magic_name__ = temp __magic_name__ = start - 1 for index in range(A_, A_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value __magic_name__ = new_pivot_index + 1 __magic_name__ = a[new_pivot_index] __magic_name__ = a[index] __magic_name__ = temp __magic_name__ = a[new_pivot_index + 1] __magic_name__ = a[end] __magic_name__ = temp return new_pivot_index + 1, count __lowerCAmelCase : Optional[int] = TemporaryFile() __lowerCAmelCase : Optional[Any] = 100 # 1000 elements are to be sorted __lowerCAmelCase , __lowerCAmelCase : Optional[int] = 0, 1 # mean and standard deviation __lowerCAmelCase : Any = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array __lowerCAmelCase : Dict = np.load(outfile) __lowerCAmelCase : Optional[int] = len(M) - 1 __lowerCAmelCase : Any = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
707
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
0
def a__ ( A_ = 200 ): '''simple docstring''' __magic_name__ = [1, 2, 5, 10, 20, 50, 100, 200] __magic_name__ = [0] * (pence + 1) __magic_name__ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCAmelCase__, pence + 1, 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73682
708
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
0
import argparse import json import subprocess def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = [] __magic_name__ = ( f'''curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"''' """ https://api.github.com/repos/huggingface/transformers/actions/runners""" ) __magic_name__ = subprocess.run(A_, shell=A_, stdout=subprocess.PIPE ) __magic_name__ = output.stdout.decode("""utf-8""" ) __magic_name__ = json.loads(A_ ) __magic_name__ = status["""runners"""] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(A_ ) # save the result so we can report them on Slack with open("""offline_runners.txt""", """w""" ) as fp: fp.write(json.dumps(A_ ) ) if len(A_ ) > 0: __magic_name__ = """\n""".join([x["""name"""] for x in offline_runners] ) raise ValueError(f'''The following runners are offline:\n{failed}''' ) if __name__ == "__main__": def a__ ( A_ ): '''simple docstring''' return values.split(""",""" ) __lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--target_runners', default=None, type=list_str, required=True, help='Comma-separated list of runners to check status.', ) parser.add_argument( '--token', default=None, type=str, required=True, help='A token that has actions:read permission.' ) __lowerCAmelCase : Optional[Any] = parser.parse_args() get_runner_status(args.target_runners, args.token)
709
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
0
import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def a__ ( A_ ): '''simple docstring''' random.seed(A_ ) np.random.seed(A_ ) torch.manual_seed(A_ ) torch.cuda.manual_seed_all(A_ ) # ^^ safe to call this function even if cuda is not available class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : Iterable[torch.nn.Parameter] , UpperCamelCase__ : float = 0.9999 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Union[float, int] = 1.0 , UpperCamelCase__ : Union[float, int] = 2 / 3 , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Dict[str, Any] = None , **UpperCamelCase__ : List[str] , ) -> str: """simple docstring""" if isinstance(UpperCamelCase__ , torch.nn.Module ): __magic_name__ = ( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , UpperCamelCase__ , standard_warn=UpperCamelCase__ , ) __magic_name__ = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility __magic_name__ = True if kwargs.get("""max_value""" , UpperCamelCase__ ) is not None: __magic_name__ = """The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate("""max_value""" , """1.0.0""" , UpperCamelCase__ , standard_warn=UpperCamelCase__ ) __magic_name__ = kwargs["""max_value"""] if kwargs.get("""min_value""" , UpperCamelCase__ ) is not None: __magic_name__ = """The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate("""min_value""" , """1.0.0""" , UpperCamelCase__ , standard_warn=UpperCamelCase__ ) __magic_name__ = kwargs["""min_value"""] __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = [p.clone().detach() for p in parameters] if kwargs.get("""device""" , UpperCamelCase__ ) is not None: __magic_name__ = """The `device` argument is deprecated. Please use `to` instead.""" deprecate("""device""" , """1.0.0""" , UpperCamelCase__ , standard_warn=UpperCamelCase__ ) self.to(device=kwargs["""device"""] ) __magic_name__ = None __magic_name__ = decay __magic_name__ = min_decay __magic_name__ = update_after_step __magic_name__ = use_ema_warmup __magic_name__ = inv_gamma __magic_name__ = power __magic_name__ = 0 __magic_name__ = None # set in `step()` __magic_name__ = model_cls __magic_name__ = model_config @classmethod def _lowercase ( cls : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" __magic_name__ , __magic_name__ = model_cls.load_config(UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ ) __magic_name__ = model_cls.from_pretrained(UpperCamelCase__ ) __magic_name__ = cls(model.parameters() , model_cls=UpperCamelCase__ , model_config=model.config ) ema_model.load_state_dict(UpperCamelCase__ ) return ema_model def _lowercase ( self : List[Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]: """simple docstring""" if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) __magic_name__ = self.model_cls.from_config(self.model_config ) __magic_name__ = self.state_dict() state_dict.pop("""shadow_params""" , UpperCamelCase__ ) model.register_to_config(**UpperCamelCase__ ) self.copy_to(model.parameters() ) model.save_pretrained(UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : int ) -> Dict: """simple docstring""" __magic_name__ = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: __magic_name__ = 1 - (1 + step / self.inv_gamma) ** -self.power else: __magic_name__ = (1 + step) / (10 + step) __magic_name__ = min(UpperCamelCase__ , self.decay ) # make sure decay is not smaller than min_decay __magic_name__ = max(UpperCamelCase__ , self.min_decay ) return cur_decay_value @torch.no_grad() def _lowercase ( self : str , UpperCamelCase__ : Iterable[torch.nn.Parameter] ) -> List[str]: """simple docstring""" if isinstance(UpperCamelCase__ , torch.nn.Module ): __magic_name__ = ( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , UpperCamelCase__ , standard_warn=UpperCamelCase__ , ) __magic_name__ = parameters.parameters() __magic_name__ = list(UpperCamelCase__ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. __magic_name__ = self.get_decay(self.optimization_step ) __magic_name__ = decay __magic_name__ = 1 - decay __magic_name__ = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , UpperCamelCase__ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): __magic_name__ = deepspeed.zero.GatheredParameters(UpperCamelCase__ , modifier_rank=UpperCamelCase__ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(UpperCamelCase__ ) def _lowercase ( self : Dict , UpperCamelCase__ : Iterable[torch.nn.Parameter] ) -> Optional[Any]: """simple docstring""" __magic_name__ = list(UpperCamelCase__ ) for s_param, param in zip(self.shadow_params , UpperCamelCase__ ): param.data.copy_(s_param.to(param.device ).data ) def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]=None ) -> Optional[Any]: """simple docstring""" __magic_name__ = [ p.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) if p.is_floating_point() else p.to(device=UpperCamelCase__ ) for p in self.shadow_params ] def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def _lowercase ( self : str , UpperCamelCase__ : Iterable[torch.nn.Parameter] ) -> Any: """simple docstring""" __magic_name__ = [param.detach().cpu().clone() for param in parameters] def _lowercase ( self : str , UpperCamelCase__ : Iterable[torch.nn.Parameter] ) -> str: """simple docstring""" if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params , UpperCamelCase__ ): param.data.copy_(c_param.data ) # Better memory-wise. __magic_name__ = None def _lowercase ( self : List[Any] , UpperCamelCase__ : dict ) -> Optional[Any]: """simple docstring""" __magic_name__ = copy.deepcopy(UpperCamelCase__ ) __magic_name__ = state_dict.get("""decay""" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) __magic_name__ = state_dict.get("""min_decay""" , self.min_decay ) if not isinstance(self.min_decay , UpperCamelCase__ ): raise ValueError("""Invalid min_decay""" ) __magic_name__ = state_dict.get("""optimization_step""" , self.optimization_step ) if not isinstance(self.optimization_step , UpperCamelCase__ ): raise ValueError("""Invalid optimization_step""" ) __magic_name__ = state_dict.get("""update_after_step""" , self.update_after_step ) if not isinstance(self.update_after_step , UpperCamelCase__ ): raise ValueError("""Invalid update_after_step""" ) __magic_name__ = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , UpperCamelCase__ ): raise ValueError("""Invalid use_ema_warmup""" ) __magic_name__ = state_dict.get("""inv_gamma""" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("""Invalid inv_gamma""" ) __magic_name__ = state_dict.get("""power""" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("""Invalid power""" ) __magic_name__ = state_dict.get("""shadow_params""" , UpperCamelCase__ ) if shadow_params is not None: __magic_name__ = shadow_params if not isinstance(self.shadow_params , UpperCamelCase__ ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(UpperCamelCase__ , torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
710
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
0
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch __lowerCAmelCase : int = random.Random() def a__ ( A_, A_=1.0, A_=None, A_=None ): '''simple docstring''' if rng is None: __magic_name__ = global_rng __magic_name__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : Tuple=2000 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Tuple=1_6000 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=80 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : List[str]=64 , UpperCamelCase__ : Optional[Any]="hann_window" , UpperCamelCase__ : Tuple=80 , UpperCamelCase__ : str=7600 , UpperCamelCase__ : Any=1E-10 , UpperCamelCase__ : int=True , ) -> List[Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = min_seq_length __magic_name__ = max_seq_length __magic_name__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __magic_name__ = feature_size __magic_name__ = padding_value __magic_name__ = sampling_rate __magic_name__ = do_normalize __magic_name__ = num_mel_bins __magic_name__ = hop_length __magic_name__ = win_length __magic_name__ = win_function __magic_name__ = fmin __magic_name__ = fmax __magic_name__ = mel_floor __magic_name__ = return_attention_mask def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def _lowercase ( self : Dict , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Union[str, Any]=False ) -> Optional[int]: """simple docstring""" def _flatten(UpperCamelCase__ : int ): return list(itertools.chain(*lowerCAmelCase_ ) ) if equal_length: __magic_name__ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __magic_name__ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __magic_name__ = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs] return speech_inputs def _lowercase ( self : Dict , UpperCamelCase__ : str=False , UpperCamelCase__ : str=False ) -> List[Any]: """simple docstring""" if equal_length: __magic_name__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __magic_name__ = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __magic_name__ = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs] return speech_inputs @require_torch class UpperCAmelCase_ ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' a__ = SpeechTaFeatureExtractor def _lowercase ( self : Any ) -> int: """simple docstring""" __magic_name__ = SpeechTaFeatureExtractionTester(self ) def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[int] ) -> int: """simple docstring""" self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0 ) - 1 ) < 1E-3 ) ) def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __magic_name__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __magic_name__ = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs] # Test not batched input __magic_name__ = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values __magic_name__ = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # Test batched __magic_name__ = feat_extract(lowerCAmelCase_ , return_tensors="""np""" ).input_values __magic_name__ = feat_extract(lowerCAmelCase_ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) def _lowercase ( self : Union[str, Any] ) -> Any: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __magic_name__ = ["""longest""", """max_length""", """do_not_pad"""] __magic_name__ = [None, 1600, None] for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ): __magic_name__ = feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors="""np""" ) __magic_name__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def _lowercase ( self : List[str] ) -> Any: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ = range(800 , 1400 , 200 ) __magic_name__ = [floats_list((1, x) )[0] for x in lengths] __magic_name__ = ["""longest""", """max_length""", """do_not_pad"""] __magic_name__ = [None, 1600, None] for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ): __magic_name__ = feat_extract(lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ ) __magic_name__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __magic_name__ = feat_extract( lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" ) __magic_name__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __magic_name__ = feat_extract( lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1000 , padding="""longest""" , return_tensors="""np""" ) __magic_name__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) __magic_name__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __magic_name__ = feat_extract( lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=2000 , padding="""longest""" , return_tensors="""np""" ) __magic_name__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def _lowercase ( self : Dict ) -> Any: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __magic_name__ = np.random.rand(100 ).astype(np.floataa ) __magic_name__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __magic_name__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __magic_name__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __magic_name__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __magic_name__ = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs] # Test feature size __magic_name__ = feature_extractor(audio_target=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""np""" ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input __magic_name__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values __magic_name__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # Test batched __magic_name__ = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_values __magic_name__ = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __magic_name__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] __magic_name__ = np.asarray(lowerCAmelCase_ ) __magic_name__ = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_values __magic_name__ = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.feat_extract_tester.prepare_inputs_for_target() __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = feat_extract.model_input_names[0] __magic_name__ = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , processed_features[input_name] ) ) ) __magic_name__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_ ) __magic_name__ = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" ) __magic_name__ = processed_features[input_name] if len(batch_features_input.shape ) < 3: __magic_name__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_ ) __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = feat_extract.model_input_names[0] __magic_name__ = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" ) __magic_name__ = processed_features[input_name] if len(batch_features_input.shape ) < 3: __magic_name__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def _lowercase ( self : List[Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = self.feat_extract_tester.prepare_inputs_for_target() __magic_name__ = feat_extract.model_input_names[0] __magic_name__ = BatchFeature({input_name: speech_inputs} ) __magic_name__ = feat_extract.num_mel_bins # hack! __magic_name__ = feat_extract.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""np""" )[input_name] __magic_name__ = feat_extract.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""pt""" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.feat_extract_dict __magic_name__ = True __magic_name__ = self.feature_extraction_class(**lowerCAmelCase_ ) __magic_name__ = self.feat_extract_tester.prepare_inputs_for_target() __magic_name__ = [len(lowerCAmelCase_ ) for x in speech_inputs] __magic_name__ = feat_extract.model_input_names[0] __magic_name__ = BatchFeature({input_name: speech_inputs} ) __magic_name__ = feat_extract.num_mel_bins # hack! __magic_name__ = feat_extract.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""np""" ) self.assertIn("""attention_mask""" , lowerCAmelCase_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase_ ) def _lowercase ( self : str ) -> List[str]: """simple docstring""" __magic_name__ = self.feat_extract_dict __magic_name__ = True __magic_name__ = self.feature_extraction_class(**lowerCAmelCase_ ) __magic_name__ = self.feat_extract_tester.prepare_inputs_for_target() __magic_name__ = [len(lowerCAmelCase_ ) for x in speech_inputs] __magic_name__ = feat_extract.model_input_names[0] __magic_name__ = BatchFeature({input_name: speech_inputs} ) __magic_name__ = min(lowerCAmelCase_ ) __magic_name__ = feat_extract.num_mel_bins # hack! __magic_name__ = feat_extract.pad( lowerCAmelCase_ , padding="""max_length""" , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""np""" ) self.assertIn("""attention_mask""" , lowerCAmelCase_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def _lowercase ( self : int , UpperCamelCase__ : List[str] ) -> List[str]: """simple docstring""" from datasets import load_dataset __magic_name__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech __magic_name__ = ds.sort("""id""" ).select(range(lowerCAmelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def _lowercase ( self : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = torch.tensor( [2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03, 3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03, 2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04, 4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03, 7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04, 4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] ) # fmt: on __magic_name__ = self._load_datasamples(1 ) __magic_name__ = SpeechTaFeatureExtractor() __magic_name__ = feature_extractor(lowerCAmelCase_ , return_tensors="""pt""" ).input_values self.assertEquals(input_values.shape , (1, 9_3680) ) self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase_ , atol=1E-6 ) ) def _lowercase ( self : List[Any] ) -> str: """simple docstring""" __magic_name__ = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) # fmt: on __magic_name__ = self._load_datasamples(1 ) __magic_name__ = SpeechTaFeatureExtractor() __magic_name__ = feature_extractor(audio_target=lowerCAmelCase_ , return_tensors="""pt""" ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase_ , atol=1E-4 ) )
711
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
0
'''simple docstring''' import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class UpperCAmelCase_ ( _UpperCamelCase ): '''simple docstring''' a__ = """microsoft/speecht5_tts""" a__ = ( """This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """ """text to read (in English) and returns a waveform object containing the sound.""" ) a__ = """text_reader""" a__ = SpeechTaProcessor a__ = SpeechTaForTextToSpeech a__ = SpeechTaHifiGan a__ = ["""text"""] a__ = ["""audio"""] def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" if self.post_processor is None: __magic_name__ = '''microsoft/speecht5_hifigan''' super().setup() def _lowercase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : str=None ) -> Any: """simple docstring""" __magic_name__ = self.pre_processor(text=_UpperCAmelCase , return_tensors="""pt""" , truncation=_UpperCAmelCase ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" ) __magic_name__ = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" ) __magic_name__ = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[int] ) -> Any: """simple docstring""" with torch.no_grad(): return self.model.generate_speech(**_UpperCAmelCase ) def _lowercase ( self : int , UpperCamelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): return self.post_processor(_UpperCAmelCase ).cpu().detach()
712
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
0
def a__ ( A_, A_ ): '''simple docstring''' _enforce_args(UpperCAmelCase__, UpperCAmelCase__ ) if n == 0: return 0 __magic_name__ = float("""-inf""" ) for i in range(1, n + 1 ): __magic_name__ = max( UpperCAmelCase__, prices[i - 1] + naive_cut_rod_recursive(n - i, UpperCAmelCase__ ) ) return max_revue def a__ ( A_, A_ ): '''simple docstring''' _enforce_args(UpperCAmelCase__, UpperCAmelCase__ ) __magic_name__ = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) def a__ ( A_, A_, A_ ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: __magic_name__ = float("""-inf""" ) for i in range(1, n + 1 ): __magic_name__ = max( UpperCAmelCase__, prices[i - 1] + _top_down_cut_rod_recursive(n - i, UpperCAmelCase__, UpperCAmelCase__ ), ) __magic_name__ = max_revenue return max_rev[n] def a__ ( A_, A_ ): '''simple docstring''' _enforce_args(UpperCAmelCase__, UpperCAmelCase__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. __magic_name__ = [float("""-inf""" ) for _ in range(n + 1 )] __magic_name__ = 0 for i in range(1, n + 1 ): __magic_name__ = max_rev[i] for j in range(1, i + 1 ): __magic_name__ = max(UpperCAmelCase__, prices[j - 1] + max_rev[i - j] ) __magic_name__ = max_revenue_i return max_rev[n] def a__ ( A_, A_ ): '''simple docstring''' if n < 0: __magic_name__ = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(UpperCAmelCase__ ) if n > len(UpperCAmelCase__ ): __magic_name__ = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(UpperCAmelCase__ )}''' ) raise ValueError(UpperCAmelCase__ ) def a__ ( ): '''simple docstring''' __magic_name__ = [6, 10, 12, 15, 20, 23] __magic_name__ = len(UpperCAmelCase__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. __magic_name__ = 36 __magic_name__ = top_down_cut_rod(UpperCAmelCase__, UpperCAmelCase__ ) __magic_name__ = bottom_up_cut_rod(UpperCAmelCase__, UpperCAmelCase__ ) __magic_name__ = naive_cut_rod_recursive(UpperCAmelCase__, UpperCAmelCase__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
713
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCAmelCase : int = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
714
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCAmelCase : int = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """falcon""" a__ = ["""past_key_values"""] def __init__( self : int , UpperCamelCase__ : List[Any]=6_5024 , UpperCamelCase__ : str=4544 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Tuple=71 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Union[str, Any]=11 , UpperCamelCase__ : int=11 , **UpperCamelCase__ : Optional[int] , ) -> Optional[int]: """simple docstring""" __magic_name__ = vocab_size # Backward compatibility with n_embed kwarg __magic_name__ = kwargs.pop("""n_embed""" , UpperCamelCase__ ) __magic_name__ = hidden_size if n_embed is None else n_embed __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = layer_norm_epsilon __magic_name__ = initializer_range __magic_name__ = use_cache __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = bos_token_id __magic_name__ = eos_token_id __magic_name__ = num_attention_heads if num_kv_heads is None else num_kv_heads __magic_name__ = alibi __magic_name__ = new_decoder_architecture __magic_name__ = multi_query # Ignored when new_decoder_architecture is True __magic_name__ = parallel_attn __magic_name__ = bias super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) @property def _lowercase ( self : List[str] ) -> Dict: """simple docstring""" return self.hidden_size // self.num_attention_heads @property def _lowercase ( self : List[str] ) -> List[Any]: """simple docstring""" return not self.alibi
715
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
0
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def a__ ( ): '''simple docstring''' __magic_name__ = argparse.ArgumentParser() parser.add_argument( """-m""", """--pretrained_model_name_or_path""", type=A_, default=A_, required=A_, help="""Path to pretrained model or model identifier from huggingface.co/models.""", ) parser.add_argument( """-c""", """--caption""", type=A_, default="""robotic cat with wings""", help="""Text used to generate images.""", ) parser.add_argument( """-n""", """--images_num""", type=A_, default=4, help="""How much images to generate.""", ) parser.add_argument( """-s""", """--seed""", type=A_, default=42, help="""Seed for random process.""", ) parser.add_argument( """-ci""", """--cuda_id""", type=A_, default=0, help="""cuda_id.""", ) __magic_name__ = parser.parse_args() return args def a__ ( A_, A_, A_ ): '''simple docstring''' if not len(A_ ) == rows * cols: raise ValueError("""The specified number of rows and columns are not correct.""" ) __magic_name__ , __magic_name__ = imgs[0].size __magic_name__ = Image.new("""RGB""", size=(cols * w, rows * h) ) __magic_name__ , __magic_name__ = grid.size for i, img in enumerate(A_ ): grid.paste(A_, box=(i % cols * w, i // cols * h) ) return grid def a__ ( A_, A_="robotic cat with wings", A_=7.5, A_=50, A_=1, A_=42, ): '''simple docstring''' __magic_name__ = torch.Generator(pipeline.device ).manual_seed(A_ ) __magic_name__ = pipeline( A_, guidance_scale=A_, num_inference_steps=A_, generator=A_, num_images_per_prompt=A_, ).images __magic_name__ = int(math.sqrt(A_ ) ) __magic_name__ = image_grid(A_, rows=_rows, cols=num_images_per_prompt // _rows ) return grid, images __lowerCAmelCase : List[str] = parse_args() # Load models and create wrapper for stable diffusion __lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') __lowerCAmelCase : Optional[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') __lowerCAmelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') __lowerCAmelCase : Any = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') __lowerCAmelCase : str = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __lowerCAmelCase : Any = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): __lowerCAmelCase : int = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: __lowerCAmelCase : str = unet.to(torch.device('cuda', args.cuda_id)) __lowerCAmelCase : Optional[int] = pipeline.to(unet.device) __lowerCAmelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) __lowerCAmelCase : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
716
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
0
import argparse import os import re import packaging.version __lowerCAmelCase : Any = "examples/" __lowerCAmelCase : Tuple = { "examples": (re.compile(R'^check_min_version\(\"[^\"]+\"\)\s*$', re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(R'^__version__\s+=\s+\"([^\"]+)\"\s*$', re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(R'^(\s*)version\s*=\s*\"[^\"]+\",', re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(R'^(\s*)release\s*=\s*\"[^\"]+\"$', re.MULTILINE), "release = \"VERSION\"\n"), } __lowerCAmelCase : List[str] = { "init": "src/diffusers/__init__.py", "setup": "setup.py", } __lowerCAmelCase : Dict = "README.md" def a__ ( A_, A_, A_ ) -> int: '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.read() __magic_name__ = REPLACE_PATTERNS[pattern] __magic_name__ = replace.replace("""VERSION""", A_ ) __magic_name__ = re_pattern.sub(A_, A_ ) with open(A_, """w""", encoding="""utf-8""", newline="""\n""" ) as f: f.write(A_ ) def a__ ( A_ ) -> Any: '''simple docstring''' for folder, directories, fnames in os.walk(A_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(A_, A_ ), A_, pattern="""examples""" ) def a__ ( A_, A_=False ) -> List[str]: '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(A_, A_, A_ ) if not patch: update_version_in_examples(A_ ) def a__ ( ) -> List[str]: '''simple docstring''' __magic_name__ = '''🤗 Transformers currently provides the following architectures''' __magic_name__ = '''1. Want to contribute a new model?''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() # Find the start of the list. __magic_name__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __magic_name__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): __magic_name__ = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""", """https://huggingface.co/docs/diffusers/model_doc""", ) index += 1 with open(A_, """w""", encoding="""utf-8""", newline="""\n""" ) as f: f.writelines(A_ ) def a__ ( ) -> Dict: '''simple docstring''' with open(REPLACE_FILES["""init"""], """r""" ) as f: __magic_name__ = f.read() __magic_name__ = REPLACE_PATTERNS['''init'''][0].search(A_ ).groups()[0] return packaging.version.parse(A_ ) def a__ ( A_=False ) -> Optional[int]: '''simple docstring''' __magic_name__ = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can\'t create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: __magic_name__ = default_version.base_version elif patch: __magic_name__ = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: __magic_name__ = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. __magic_name__ = input(f'''Which version are you releasing? [{default_version}]''' ) if len(A_ ) == 0: __magic_name__ = default_version print(f'''Updating version to {version}.''' ) global_version_update(A_, patch=A_ ) def a__ ( ) -> str: '''simple docstring''' __magic_name__ = get_version() __magic_name__ = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' __magic_name__ = current_version.base_version # Check with the user we got that right. __magic_name__ = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(A_ ) == 0: __magic_name__ = dev_version print(f'''Updating version to {version}.''' ) global_version_update(A_ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') __lowerCAmelCase : Any = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
717
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
0
from collections.abc import Sequence from queue import Queue class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None ) -> Dict: """simple docstring""" __magic_name__ = start __magic_name__ = end __magic_name__ = val __magic_name__ = (start + end) // 2 __magic_name__ = left __magic_name__ = right def __repr__( self : Optional[Any] ) -> List[Any]: """simple docstring""" return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> Dict: """simple docstring""" __magic_name__ = collection __magic_name__ = function if self.collection: __magic_name__ = self._build_tree(0 , len(UpperCamelCase__ ) - 1 ) def _lowercase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self._update_tree(self.root , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" return self._query_range(self.root , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> str: """simple docstring""" if start == end: return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.collection[start] ) __magic_name__ = (start + end) // 2 __magic_name__ = self._build_tree(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self._build_tree(mid + 1 , UpperCamelCase__ ) return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.fn(left.val , right.val ) , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" if node.start == i and node.end == i: __magic_name__ = val return if i <= node.mid: self._update_tree(node.left , UpperCamelCase__ , UpperCamelCase__ ) else: self._update_tree(node.right , UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self.fn(node.left.val , node.right.val ) def _lowercase ( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , UpperCamelCase__ , UpperCamelCase__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , UpperCamelCase__ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase__ ) , ) else: # range in right child tree return self._query_range(node.right , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> Optional[int]: """simple docstring""" if self.root is not None: __magic_name__ = Queue() queue.put(self.root ) while not queue.empty(): __magic_name__ = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('*' * 50) __lowerCAmelCase : str = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
718
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
0
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( A_ ): '''simple docstring''' __magic_name__ , __magic_name__ = analyze_text(UpperCamelCase__ ) __magic_name__ = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. __magic_name__ = sum(single_char_strings.values() ) # one length string __magic_name__ = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __magic_name__ = single_char_strings[ch] __magic_name__ = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase__ ) # entropy formula. # print entropy print(f'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string __magic_name__ = sum(two_char_strings.values() ) __magic_name__ = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __magic_name__ = cha + cha if sequence in two_char_strings: __magic_name__ = two_char_strings[sequence] __magic_name__ = int(UpperCamelCase__ ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase__ ) # print second entropy print(f'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = Counter() # type: ignore __magic_name__ = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0, len(UpperCamelCase__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ): '''simple docstring''' import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
719
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
0
def a__ ( A_, A_ ): '''simple docstring''' if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
720
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = OrderedDict( [ ('audio-spectrogram-transformer', 'ASTFeatureExtractor'), ('beit', 'BeitFeatureExtractor'), ('chinese_clip', 'ChineseCLIPFeatureExtractor'), ('clap', 'ClapFeatureExtractor'), ('clip', 'CLIPFeatureExtractor'), ('clipseg', 'ViTFeatureExtractor'), ('conditional_detr', 'ConditionalDetrFeatureExtractor'), ('convnext', 'ConvNextFeatureExtractor'), ('cvt', 'ConvNextFeatureExtractor'), ('data2vec-audio', 'Wav2Vec2FeatureExtractor'), ('data2vec-vision', 'BeitFeatureExtractor'), ('deformable_detr', 'DeformableDetrFeatureExtractor'), ('deit', 'DeiTFeatureExtractor'), ('detr', 'DetrFeatureExtractor'), ('dinat', 'ViTFeatureExtractor'), ('donut-swin', 'DonutFeatureExtractor'), ('dpt', 'DPTFeatureExtractor'), ('encodec', 'EncodecFeatureExtractor'), ('flava', 'FlavaFeatureExtractor'), ('glpn', 'GLPNFeatureExtractor'), ('groupvit', 'CLIPFeatureExtractor'), ('hubert', 'Wav2Vec2FeatureExtractor'), ('imagegpt', 'ImageGPTFeatureExtractor'), ('layoutlmv2', 'LayoutLMv2FeatureExtractor'), ('layoutlmv3', 'LayoutLMv3FeatureExtractor'), ('levit', 'LevitFeatureExtractor'), ('maskformer', 'MaskFormerFeatureExtractor'), ('mctct', 'MCTCTFeatureExtractor'), ('mobilenet_v1', 'MobileNetV1FeatureExtractor'), ('mobilenet_v2', 'MobileNetV2FeatureExtractor'), ('mobilevit', 'MobileViTFeatureExtractor'), ('nat', 'ViTFeatureExtractor'), ('owlvit', 'OwlViTFeatureExtractor'), ('perceiver', 'PerceiverFeatureExtractor'), ('poolformer', 'PoolFormerFeatureExtractor'), ('regnet', 'ConvNextFeatureExtractor'), ('resnet', 'ConvNextFeatureExtractor'), ('segformer', 'SegformerFeatureExtractor'), ('sew', 'Wav2Vec2FeatureExtractor'), ('sew-d', 'Wav2Vec2FeatureExtractor'), ('speech_to_text', 'Speech2TextFeatureExtractor'), ('speecht5', 'SpeechT5FeatureExtractor'), ('swiftformer', 'ViTFeatureExtractor'), ('swin', 'ViTFeatureExtractor'), ('swinv2', 'ViTFeatureExtractor'), ('table-transformer', 'DetrFeatureExtractor'), ('timesformer', 'VideoMAEFeatureExtractor'), ('tvlt', 'TvltFeatureExtractor'), ('unispeech', 'Wav2Vec2FeatureExtractor'), ('unispeech-sat', 'Wav2Vec2FeatureExtractor'), ('van', 'ConvNextFeatureExtractor'), ('videomae', 'VideoMAEFeatureExtractor'), ('vilt', 'ViltFeatureExtractor'), ('vit', 'ViTFeatureExtractor'), ('vit_mae', 'ViTFeatureExtractor'), ('vit_msn', 'ViTFeatureExtractor'), ('wav2vec2', 'Wav2Vec2FeatureExtractor'), ('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'), ('wavlm', 'Wav2Vec2FeatureExtractor'), ('whisper', 'WhisperFeatureExtractor'), ('xclip', 'CLIPFeatureExtractor'), ('yolos', 'YolosFeatureExtractor'), ] ) __lowerCAmelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def a__ ( A_ ): '''simple docstring''' for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: __magic_name__ = model_type_to_module_name(A_ ) __magic_name__ = importlib.import_module(f'''.{module_name}''', """transformers.models""" ) try: return getattr(A_, A_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(A_, """__name__""", A_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __magic_name__ = importlib.import_module("""transformers""" ) if hasattr(A_, A_ ): return getattr(A_, A_ ) return None def a__ ( A_, A_ = None, A_ = False, A_ = False, A_ = None, A_ = None, A_ = None, A_ = False, **A_, ): '''simple docstring''' __magic_name__ = get_file_from_repo( A_, A_, cache_dir=A_, force_download=A_, resume_download=A_, proxies=A_, use_auth_token=A_, revision=A_, local_files_only=A_, ) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(A_, encoding="""utf-8""" ) as reader: return json.load(A_ ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> Tuple: """simple docstring""" raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(_SCREAMING_SNAKE_CASE ) def _lowercase ( cls : int , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = kwargs.pop("""config""" , _SCREAMING_SNAKE_CASE ) __magic_name__ = kwargs.pop("""trust_remote_code""" , _SCREAMING_SNAKE_CASE ) __magic_name__ = True __magic_name__ , __magic_name__ = FeatureExtractionMixin.get_feature_extractor_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __magic_name__ = config_dict.get("""feature_extractor_type""" , _SCREAMING_SNAKE_CASE ) __magic_name__ = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): __magic_name__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __magic_name__ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) # It could be in `config.feature_extractor_type`` __magic_name__ = getattr(_SCREAMING_SNAKE_CASE , """feature_extractor_type""" , _SCREAMING_SNAKE_CASE ) if hasattr(_SCREAMING_SNAKE_CASE , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: __magic_name__ = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: __magic_name__ = feature_extractor_class_from_name(_SCREAMING_SNAKE_CASE ) __magic_name__ = feature_extractor_auto_map is not None __magic_name__ = feature_extractor_class is not None or type(_SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING __magic_name__ = resolve_trust_remote_code( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if has_remote_code and trust_remote_code: __magic_name__ = get_class_from_dynamic_module( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __magic_name__ = kwargs.pop("""code_revision""" , _SCREAMING_SNAKE_CASE ) if os.path.isdir(_SCREAMING_SNAKE_CASE ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING: __magic_name__ = FEATURE_EXTRACTOR_MAPPING[type(_SCREAMING_SNAKE_CASE )] return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def _lowercase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ) -> Any: """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
721
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def a__ ( A_ ): '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __magic_name__ = key.replace("""heads.cmd.mim_head.cls.predictions""", """mmm_image_head""" ) __magic_name__ = key.replace("""heads.cmd.mlm_head.cls.predictions""", """mmm_text_head""" ) __magic_name__ = key.replace("""heads.cmd.itm_head.cls""", """itm_head""" ) __magic_name__ = key.replace("""heads.cmd.itm_head.pooler""", """itm_head.pooler""" ) __magic_name__ = key.replace("""heads.cmd.clip_head.logit_scale""", """flava.logit_scale""" ) __magic_name__ = key.replace("""heads.fairseq_mlm.cls.predictions""", """mlm_head""" ) __magic_name__ = key.replace("""heads.imagenet.mim_head.cls.predictions""", """mim_head""" ) __magic_name__ = key.replace("""mm_text_projection""", """flava.text_to_mm_projection""" ) __magic_name__ = key.replace("""mm_image_projection""", """flava.image_to_mm_projection""" ) __magic_name__ = key.replace("""image_encoder.module""", """flava.image_model""" ) __magic_name__ = key.replace("""text_encoder.module""", """flava.text_model""" ) __magic_name__ = key.replace("""mm_encoder.module.encoder.cls_token""", """flava.multimodal_model.cls_token""" ) __magic_name__ = key.replace("""mm_encoder.module""", """flava.multimodal_model""" ) __magic_name__ = key.replace("""text_projection""", """flava.text_projection""" ) __magic_name__ = key.replace("""image_projection""", """flava.image_projection""" ) __magic_name__ = value.float() for key, value in codebook_state_dict.items(): __magic_name__ = value return upgrade @torch.no_grad() def a__ ( A_, A_, A_, A_=None ): '''simple docstring''' if config_path is not None: __magic_name__ = FlavaConfig.from_pretrained(A_ ) else: __magic_name__ = FlavaConfig() __magic_name__ = FlavaForPreTraining(A_ ).eval() __magic_name__ = convert_dalle_checkpoint(A_, A_, save_checkpoint=A_ ) if os.path.exists(A_ ): __magic_name__ = torch.load(A_, map_location="""cpu""" ) else: __magic_name__ = torch.hub.load_state_dict_from_url(A_, map_location="""cpu""" ) __magic_name__ = upgrade_state_dict(A_, A_ ) hf_model.load_state_dict(A_ ) __magic_name__ = hf_model.state_dict() __magic_name__ = count_parameters(A_ ) __magic_name__ = count_parameters(A_ ) + count_parameters(A_ ) assert torch.allclose(A_, A_, atol=1e-3 ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __lowerCAmelCase : List[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
700
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
0
from pathlib import Path import numpy as np from PIL import Image def a__ ( A_ ): '''simple docstring''' __magic_name__ , __magic_name__ , __magic_name__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def a__ ( A_ ): '''simple docstring''' return (gray > 127) & (gray <= 255) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = np.zeros_like(A_ ) __magic_name__ = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __magic_name__ = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __magic_name__ = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __magic_name__ = int(summation > 0 ) return output if __name__ == "__main__": # read original image __lowerCAmelCase : List[str] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg' __lowerCAmelCase : Tuple = np.array(Image.open(lena_path)) # kernel to be applied __lowerCAmelCase : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __lowerCAmelCase : str = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __lowerCAmelCase : Tuple = Image.fromarray(output).convert('RGB') pil_img.save('result_dilation.png')
701
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
0
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __lowerCAmelCase : str = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} state_dict.pop("""pixel_mean""", A_ ) state_dict.pop("""pixel_std""", A_ ) __magic_name__ = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*""" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __magic_name__ = key.replace(A_, A_ ) if re.match(A_, A_ ): __magic_name__ = int(re.match(A_, A_ ).group(2 ) ) if layer_nb == 0: __magic_name__ = key.replace("""layers.0""", """proj_in""" ) elif layer_nb == 1: __magic_name__ = key.replace("""layers.1""", """layers.0""" ) elif layer_nb == 2: __magic_name__ = key.replace("""layers.2""", """proj_out""" ) __magic_name__ = value __magic_name__ = model_state_dict[ """prompt_encoder.shared_embedding.positional_embedding""" ] return model_state_dict def a__ ( A_, A_, A_, A_="ybelkada/segment-anything" ): '''simple docstring''' __magic_name__ = hf_hub_download(A_, f'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: __magic_name__ = SamConfig() elif "sam_vit_l" in model_name: __magic_name__ = SamVisionConfig( hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], ) __magic_name__ = SamConfig( vision_config=A_, ) elif "sam_vit_h" in model_name: __magic_name__ = SamVisionConfig( hidden_size=1280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], ) __magic_name__ = SamConfig( vision_config=A_, ) __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = replace_keys(A_ ) __magic_name__ = SamImageProcessor() __magic_name__ = SamProcessor(image_processor=A_ ) __magic_name__ = SamModel(A_ ) hf_model.load_state_dict(A_ ) __magic_name__ = hf_model.to("""cuda""" ) __magic_name__ = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ).convert("""RGB""" ) __magic_name__ = [[[400, 650]]] __magic_name__ = [[1]] __magic_name__ = processor(images=np.array(A_ ), return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __magic_name__ = hf_model(**A_ ) __magic_name__ = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 __magic_name__ = processor( images=np.array(A_ ), input_points=A_, input_labels=A_, return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __magic_name__ = hf_model(**A_ ) __magic_name__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 __magic_name__ = ((75, 275, 1725, 850),) __magic_name__ = processor(images=np.array(A_ ), input_boxes=A_, return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __magic_name__ = hf_model(**A_ ) __magic_name__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. __magic_name__ = [[[400, 650], [800, 650]]] __magic_name__ = [[1, 1]] __magic_name__ = processor( images=np.array(A_ ), input_points=A_, input_labels=A_, return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __magic_name__ = hf_model(**A_ ) __magic_name__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": __lowerCAmelCase : int = argparse.ArgumentParser() __lowerCAmelCase : Optional[Any] = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) __lowerCAmelCase : Dict = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
0
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): __lowerCAmelCase : int = True from torch.cuda.amp import autocast __lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ = field( default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ = field( default=_A , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) a__ = field( default=_A , metadata={"""help""": """Whether to log verbose messages or not."""} , ) a__ = field( default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} ) a__ = field( default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} ) a__ = field( default=0.99_99_95 , metadata={"""help""": """Decay of gumbel temperature during training."""} ) def a__ ( A_, A_ ): '''simple docstring''' logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], ) __magic_name__ = logging.WARNING if model_args.verbose_logging: __magic_name__ = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): __magic_name__ = logging.INFO logger.setLevel(A_ ) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( default=_A , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a__ = field( default=_A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) a__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) a__ = field( default="""validation""" , metadata={ """help""": ( """The name of the validation data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) a__ = field( default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , ) a__ = field( default=_A , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a__ = field( default=1 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) a__ = field( default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) a__ = field( default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} ) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = 42 a__ = 42 a__ = """longest""" a__ = None a__ = None def __call__( self : int , UpperCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: """simple docstring""" __magic_name__ = self.feature_extractor.pad( UpperCamelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) __magic_name__ = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) __magic_name__ = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula __magic_name__ = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) __magic_name__ = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to __magic_name__ = 1 __magic_name__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices __magic_name__ = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCamelCase__ , min_masks=2 , ) return batch class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Union[str, Any]=1.0 , **UpperCamelCase__ : Optional[int] ) -> List[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = 0 __magic_name__ = max_gumbel_temp __magic_name__ = min_gumbel_temp __magic_name__ = gumbel_temp_decay def _lowercase ( self : Any , UpperCamelCase__ : nn.Module , UpperCamelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: """simple docstring""" model.train() __magic_name__ = self._prepare_inputs(UpperCamelCase__ ) if self.use_amp: with autocast(): __magic_name__ = self.compute_loss(UpperCamelCase__ , UpperCamelCase__ ) else: __magic_name__ = self.compute_loss(UpperCamelCase__ , UpperCamelCase__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": __magic_name__ = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __magic_name__ = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: __magic_name__ = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(UpperCamelCase__ ).backward() elif self.use_apex: with amp.scale_loss(UpperCamelCase__ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(UpperCamelCase__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def a__ ( ): '''simple docstring''' __magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses() configure_logger(A_, A_ ) # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" __magic_name__ = DatasetDict() __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''', cache_dir=model_args.cache_dir, ) __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''', cache_dir=model_args.cache_dir, ) else: # make sure only "validation" and "train" keys remain" __magic_name__ = DatasetDict() __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split="""validation""", cache_dir=model_args.cache_dir, ) __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f'''{data_args.train_split_name}''', cache_dir=model_args.cache_dir, ) # only normalized-inputs-training is supported __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=A_ ) def prepare_dataset(A_ ): # check that all files have the correct sampling rate __magic_name__ , __magic_name__ = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays __magic_name__ = datasets.map( A_, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long __magic_name__ = vectorized_datasets.filter( lambda A_ : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(A_ ): return feature_extractor(batch["""speech"""], sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` __magic_name__ = vectorized_datasets.map( A_, batched=A_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets["""train"""].column_names, ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 __magic_name__ = WavaVecaConfig.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) __magic_name__ = WavaVecaForPreTraining(A_ ) __magic_name__ = DataCollatorForWavaVecaPretraining(model=A_, feature_extractor=A_ ) __magic_name__ = WavaVecaPreTrainer( model=A_, data_collator=A_, args=A_, train_dataset=vectorized_datasets["""train"""], eval_dataset=vectorized_datasets["""validation"""], tokenizer=A_, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, ) trainer.train() if __name__ == "__main__": main()
703
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
0
from manim import * class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = Rectangle(height=0.5 , width=0.5 ) __magic_name__ = Rectangle(height=0.25 , width=0.25 ) __magic_name__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __magic_name__ = [mem.copy() for i in range(6 )] __magic_name__ = [mem.copy() for i in range(6 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""CPU""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(UpperCamelCase__ ) __magic_name__ = [mem.copy() for i in range(4 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""GPU""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(UpperCamelCase__ ) __magic_name__ = [mem.copy() for i in range(6 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""Model""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) model.move_to([3, -1.0, 0] ) self.add(UpperCamelCase__ ) __magic_name__ = [] __magic_name__ = [] __magic_name__ = [] for i, rect in enumerate(UpperCamelCase__ ): rect.set_stroke(UpperCamelCase__ ) __magic_name__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase__ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase__ , buff=0.0 ) self.add(UpperCamelCase__ ) model_cpu_arr.append(UpperCamelCase__ ) self.add(*UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ ) __magic_name__ = [mem.copy() for i in range(6 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""Loaded Checkpoint""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(UpperCamelCase__ ) __magic_name__ = [] __magic_name__ = [] for i, rect in enumerate(UpperCamelCase__ ): __magic_name__ = fill.copy().set_fill(UpperCamelCase__ , opacity=0.7 ) target.move_to(UpperCamelCase__ ) ckpt_arr.append(UpperCamelCase__ ) __magic_name__ = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(UpperCamelCase__ ) self.add(*UpperCamelCase__ , *UpperCamelCase__ ) __magic_name__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __magic_name__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(UpperCamelCase__ ) __magic_name__ = MarkupText( F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) __magic_name__ = [meta_mem.copy() for i in range(6 )] __magic_name__ = [meta_mem.copy() for i in range(6 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""Disk""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(UpperCamelCase__ , run_time=3 ) , Write(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) ) __magic_name__ = [] for i, rect in enumerate(UpperCamelCase__ ): __magic_name__ = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) ) self.play(*UpperCamelCase__ ) self.play(FadeOut(UpperCamelCase__ ) ) __magic_name__ = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCamelCase__ , run_time=3 ) ) self.play( FadeOut(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ ) , ) self.wait()
704
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
0
from __future__ import annotations import math def a__ ( A_ ): '''simple docstring''' if num <= 0: __magic_name__ = f'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(A_ ) __magic_name__ = [True] * (num + 1) __magic_name__ = [] __magic_name__ = 2 __magic_name__ = int(math.sqrt(A_ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(A_ ) # Set multiples of start be False for i in range(start * start, num + 1, A_ ): if sieve[i] is True: __magic_name__ = False start += 1 for j in range(end + 1, num + 1 ): if sieve[j] is True: prime.append(A_ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
706
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
0
def a__ ( A_, A_ ): '''simple docstring''' if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
707
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
708
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
0
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" debug_launcher(test_script.main ) def _lowercase ( self : Tuple ) -> str: """simple docstring""" debug_launcher(test_ops.main )
709
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
0
from math import pi, sqrt, tan def a__ ( A_ ): '''simple docstring''' if side_length < 0: raise ValueError("""surface_area_cube() only accepts non-negative values""" ) return 6 * side_length**2 def a__ ( A_, A_, A_ ): '''simple docstring''' if length < 0 or breadth < 0 or height < 0: raise ValueError("""surface_area_cuboid() only accepts non-negative values""" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def a__ ( A_ ): '''simple docstring''' if radius < 0: raise ValueError("""surface_area_sphere() only accepts non-negative values""" ) return 4 * pi * radius**2 def a__ ( A_ ): '''simple docstring''' if radius < 0: raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" ) return 3 * pi * radius**2 def a__ ( A_, A_ ): '''simple docstring''' if radius < 0 or height < 0: raise ValueError("""surface_area_cone() only accepts non-negative values""" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def a__ ( A_, A_, A_ ): '''simple docstring''' if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( """surface_area_conical_frustum() only accepts non-negative values""" ) __magic_name__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def a__ ( A_, A_ ): '''simple docstring''' if radius < 0 or height < 0: raise ValueError("""surface_area_cylinder() only accepts non-negative values""" ) return 2 * pi * radius * (height + radius) def a__ ( A_, A_ ): '''simple docstring''' if torus_radius < 0 or tube_radius < 0: raise ValueError("""surface_area_torus() only accepts non-negative values""" ) if torus_radius < tube_radius: raise ValueError( """surface_area_torus() does not support spindle or self intersecting tori""" ) return 4 * pow(A_, 2 ) * torus_radius * tube_radius def a__ ( A_, A_ ): '''simple docstring''' if length < 0 or width < 0: raise ValueError("""area_rectangle() only accepts non-negative values""" ) return length * width def a__ ( A_ ): '''simple docstring''' if side_length < 0: raise ValueError("""area_square() only accepts non-negative values""" ) return side_length**2 def a__ ( A_, A_ ): '''simple docstring''' if base < 0 or height < 0: raise ValueError("""area_triangle() only accepts non-negative values""" ) return (base * height) / 2 def a__ ( A_, A_, A_ ): '''simple docstring''' if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("""Given three sides do not form a triangle""" ) __magic_name__ = (sidea + sidea + sidea) / 2 __magic_name__ = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def a__ ( A_, A_ ): '''simple docstring''' if base < 0 or height < 0: raise ValueError("""area_parallelogram() only accepts non-negative values""" ) return base * height def a__ ( A_, A_, A_ ): '''simple docstring''' if basea < 0 or basea < 0 or height < 0: raise ValueError("""area_trapezium() only accepts non-negative values""" ) return 1 / 2 * (basea + basea) * height def a__ ( A_ ): '''simple docstring''' if radius < 0: raise ValueError("""area_circle() only accepts non-negative values""" ) return pi * radius**2 def a__ ( A_, A_ ): '''simple docstring''' if radius_x < 0 or radius_y < 0: raise ValueError("""area_ellipse() only accepts non-negative values""" ) return pi * radius_x * radius_y def a__ ( A_, A_ ): '''simple docstring''' if diagonal_a < 0 or diagonal_a < 0: raise ValueError("""area_rhombus() only accepts non-negative values""" ) return 1 / 2 * diagonal_a * diagonal_a def a__ ( A_, A_ ): '''simple docstring''' if not isinstance(A_, A_ ) or sides < 3: raise ValueError( """area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides""" ) elif length < 0: raise ValueError( """area_reg_polygon() only accepts non-negative values as \ length of a side""" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(f'''Rectangle: {area_rectangle(10, 20) = }''') print(f'''Square: {area_square(10) = }''') print(f'''Triangle: {area_triangle(10, 10) = }''') print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''') print(f'''Parallelogram: {area_parallelogram(10, 20) = }''') print(f'''Rhombus: {area_rhombus(10, 20) = }''') print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''') print(f'''Circle: {area_circle(20) = }''') print(f'''Ellipse: {area_ellipse(10, 20) = }''') print('\nSurface Areas of various geometric shapes: \n') print(f'''Cube: {surface_area_cube(20) = }''') print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''') print(f'''Sphere: {surface_area_sphere(20) = }''') print(f'''Hemisphere: {surface_area_hemisphere(20) = }''') print(f'''Cone: {surface_area_cone(10, 20) = }''') print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''') print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''') print(f'''Torus: {surface_area_torus(20, 10) = }''') print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''') print(f'''Square: {area_reg_polygon(4, 10) = }''') print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
710
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
0
from math import pi, sqrt def a__ ( A_ ): '''simple docstring''' if num <= 0: raise ValueError("""math domain error""" ) if num > 171.5: raise OverflowError("""math range error""" ) elif num - int(A_ ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(A_ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def a__ ( ): '''simple docstring''' assert gamma(0.5 ) == sqrt(A_ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : Any = 1.0 while num: __lowerCAmelCase : Optional[Any] = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
711
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: __magic_name__ = 192 __magic_name__ = 768 __magic_name__ = 12 __magic_name__ = 3 __magic_name__ = [800, 1333] __magic_name__ = False elif yolos_name == "yolos_s_dWr": __magic_name__ = 330 __magic_name__ = 14 __magic_name__ = 6 __magic_name__ = 1320 elif "yolos_s" in yolos_name: __magic_name__ = 384 __magic_name__ = 1536 __magic_name__ = 12 __magic_name__ = 6 elif "yolos_b" in yolos_name: __magic_name__ = [800, 1344] __magic_name__ = 91 __magic_name__ = """huggingface/label-files""" __magic_name__ = """coco-detection-id2label.json""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ): v for k, v in idalabel.items()} __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( A_, A_, A_ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) __magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __magic_name__ = in_proj_weight[: config.hidden_size, :] __magic_name__ = in_proj_bias[: config.hidden_size] __magic_name__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __magic_name__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __magic_name__ = in_proj_weight[-config.hidden_size :, :] __magic_name__ = in_proj_bias[-config.hidden_size :] def a__ ( A_ ): '''simple docstring''' if "backbone" in name: __magic_name__ = name.replace("""backbone""", """vit""" ) if "cls_token" in name: __magic_name__ = name.replace("""cls_token""", """embeddings.cls_token""" ) if "det_token" in name: __magic_name__ = name.replace("""det_token""", """embeddings.detection_tokens""" ) if "mid_pos_embed" in name: __magic_name__ = name.replace("""mid_pos_embed""", """encoder.mid_position_embeddings""" ) if "pos_embed" in name: __magic_name__ = name.replace("""pos_embed""", """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __magic_name__ = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" ) if "blocks" in name: __magic_name__ = name.replace("""blocks""", """encoder.layer""" ) if "attn.proj" in name: __magic_name__ = name.replace("""attn.proj""", """attention.output.dense""" ) if "attn" in name: __magic_name__ = name.replace("""attn""", """attention.self""" ) if "norm1" in name: __magic_name__ = name.replace("""norm1""", """layernorm_before""" ) if "norm2" in name: __magic_name__ = name.replace("""norm2""", """layernorm_after""" ) if "mlp.fc1" in name: __magic_name__ = name.replace("""mlp.fc1""", """intermediate.dense""" ) if "mlp.fc2" in name: __magic_name__ = name.replace("""mlp.fc2""", """output.dense""" ) if "class_embed" in name: __magic_name__ = name.replace("""class_embed""", """class_labels_classifier""" ) if "bbox_embed" in name: __magic_name__ = name.replace("""bbox_embed""", """bbox_predictor""" ) if "vit.norm" in name: __magic_name__ = name.replace("""vit.norm""", """vit.layernorm""" ) return name def a__ ( A_, A_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): __magic_name__ = orig_state_dict.pop(A_ ) if "qkv" in key: __magic_name__ = key.split(""".""" ) __magic_name__ = int(key_split[2] ) __magic_name__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: __magic_name__ = val[:dim, :] __magic_name__ = val[ dim : dim * 2, : ] __magic_name__ = val[-dim:, :] else: __magic_name__ = val[:dim] __magic_name__ = val[dim : dim * 2] __magic_name__ = val[-dim:] else: __magic_name__ = val return orig_state_dict def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_ = False ): '''simple docstring''' __magic_name__ = get_yolos_config(A_ ) # load original state_dict __magic_name__ = torch.load(A_, map_location="""cpu""" )["""model"""] # load 🤗 model __magic_name__ = YolosForObjectDetection(A_ ) model.eval() __magic_name__ = convert_state_dict(A_, A_ ) model.load_state_dict(A_ ) # Check outputs on an image, prepared by YolosImageProcessor __magic_name__ = 800 if yolos_name != """yolos_ti""" else 512 __magic_name__ = YolosImageProcessor(format="""coco_detection""", size=A_ ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ , __magic_name__ = outputs.logits, outputs.pred_boxes __magic_name__ , __magic_name__ = None, None if yolos_name == "yolos_ti": __magic_name__ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) __magic_name__ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": __magic_name__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) __magic_name__ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": __magic_name__ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) __magic_name__ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": __magic_name__ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) __magic_name__ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": __magic_name__ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) __magic_name__ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3], A_, atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: __magic_name__ = { """yolos_ti""": """yolos-tiny""", """yolos_s_200_pre""": """yolos-small""", """yolos_s_300_pre""": """yolos-small-300""", """yolos_s_dWr""": """yolos-small-dwr""", """yolos_base""": """yolos-base""", } print("""Pushing to the hub...""" ) __magic_name__ = model_mapping[yolos_name] image_processor.push_to_hub(A_, organization="""hustvl""" ) model.push_to_hub(A_, organization="""hustvl""" ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : Tuple = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
712
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
0
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __lowerCAmelCase : Tuple = 2 class UpperCAmelCase_ : '''simple docstring''' def __init__( self : int , *, # begin keyword-only arguments UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : int="<unk>" , UpperCamelCase__ : int=None , ) -> Any: """simple docstring""" __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = bos, unk, pad, eos __magic_name__ = [] __magic_name__ = [] __magic_name__ = {} __magic_name__ = self.add_symbol(UpperCamelCase__ ) __magic_name__ = self.add_symbol(UpperCamelCase__ ) __magic_name__ = self.add_symbol(UpperCamelCase__ ) __magic_name__ = self.add_symbol(UpperCamelCase__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(UpperCamelCase__ ) __magic_name__ = len(self.symbols ) def __eq__( self : Dict , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" return self.indices == other.indices def __getitem__( self : Dict , UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : Optional[Any] ) -> List[Any]: """simple docstring""" return len(self.symbols ) def __contains__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" return sym in self.indices @classmethod def _lowercase ( cls : str , UpperCamelCase__ : Any ) -> Union[str, Any]: """simple docstring""" __magic_name__ = cls() d.add_from_file(UpperCamelCase__ ) return d def _lowercase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Any=False ) -> Union[str, Any]: """simple docstring""" if word in self.indices and not overwrite: __magic_name__ = self.indices[word] __magic_name__ = self.count[idx] + n return idx else: __magic_name__ = len(self.symbols ) __magic_name__ = idx self.symbols.append(UpperCamelCase__ ) self.count.append(UpperCamelCase__ ) return idx def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int ) -> List[str]: """simple docstring""" return 0 def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> List[str]: """simple docstring""" if isinstance(UpperCamelCase__ , UpperCamelCase__ ): try: with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(UpperCamelCase__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(UpperCamelCase__ ) ) return __magic_name__ = f.readlines() __magic_name__ = self._load_meta(UpperCamelCase__ ) for line in lines[indices_start_line:]: try: __magic_name__ , __magic_name__ = line.rstrip().rsplit(""" """ , 1 ) if field == "#fairseq:overwrite": __magic_name__ = True __magic_name__ , __magic_name__ = line.rsplit(""" """ , 1 ) else: __magic_name__ = False __magic_name__ = int(UpperCamelCase__ ) __magic_name__ = line if word in self and not overwrite: raise RuntimeError( """Duplicate word found when loading Dictionary: '{}'. """ """Duplicate words can overwrite earlier ones by adding the """ """#fairseq:overwrite flag at the end of the corresponding row """ """in the dictionary file. If using the Camembert model, please """ """download an updated copy of the model file.""".format(UpperCamelCase__ ) ) self.add_symbol(UpperCamelCase__ , n=UpperCamelCase__ , overwrite=UpperCamelCase__ ) except ValueError: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = dict((re.sub(R"""@@$""", """""", A_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""", """</w>""", A_ ), v) for k, v in d.items() ) __magic_name__ = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[f'''{k}</w>'''] __magic_name__ = d[k] # restore return da def a__ ( A_, A_ ): '''simple docstring''' if not os.path.exists(A_ ): raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(A_, exist_ok=A_ ) print(f'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models __magic_name__ = os.path.join(A_, """checkpoint.pt""" ) if not os.path.isfile(A_ ): raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' ) __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = chkpt["""cfg"""]["""model"""] # dicts __magic_name__ = os.path.join(A_, """dict.txt""" ) if not os.path.isfile(A_ ): raise ValueError(f'''path to the file {dict_file} does not exist!''' ) __magic_name__ = Dictionary.load(A_ ) __magic_name__ = rewrite_dict_keys(src_dict.indices ) __magic_name__ = len(A_ ) __magic_name__ = os.path.join(A_, VOCAB_FILES_NAMES["""vocab_file"""] ) print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(A_, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(A_, ensure_ascii=A_, indent=A_ ) ) # merges_file (bpecodes) __magic_name__ = os.path.join(A_, """bpecodes""" ) if not os.path.isfile(A_ ): raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' ) __magic_name__ = os.path.join(A_, VOCAB_FILES_NAMES["""merges_file"""] ) shutil.copyfile(A_, A_ ) # model config __magic_name__ = os.path.join(A_, """config.json""" ) __magic_name__ = { """activation_dropout""": args["""activation_dropout"""], """architectures""": ["""BioGptForCausalLM"""], """attention_probs_dropout_prob""": args["""attention_dropout"""], """bos_token_id""": 0, """eos_token_id""": 2, """hidden_act""": args["""activation_fn"""], """hidden_dropout_prob""": args["""dropout"""], """hidden_size""": args["""decoder_embed_dim"""], """initializer_range""": 0.02, """intermediate_size""": args["""decoder_ffn_embed_dim"""], """layer_norm_eps""": 1e-12, """layerdrop""": args["""decoder_layerdrop"""], """max_position_embeddings""": args["""max_target_positions"""], """model_type""": """biogpt""", """num_attention_heads""": args["""decoder_attention_heads"""], """num_hidden_layers""": args["""decoder_layers"""], """pad_token_id""": 1, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_decoder_input_output_embed"""], """vocab_size""": src_vocab_size, } # good hparam defaults to start with print(f'''Generating {biogpt_model_config_file}''' ) with open(A_, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(A_, ensure_ascii=A_, indent=A_ ) ) # tokenizer config __magic_name__ = os.path.join(A_, A_ ) __magic_name__ = { """bos_token""": """<s>""", """eos_token""": """</s>""", """model_max_length""": 1024, """pad_token""": """<pad>""", """special_tokens_map_file""": None, """tokenizer_class""": """BioGptTokenizer""", """unk_token""": """<unk>""", } print(f'''Generating {biogpt_tokenizer_config_file}''' ) with open(A_, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(A_, ensure_ascii=A_, indent=A_ ) ) # model __magic_name__ = chkpt["""model"""] # remove unneeded keys __magic_name__ = [ """decoder.version""", ] for k in ignore_keys: model_state_dict.pop(A_, A_ ) __magic_name__ = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("""output_projection.weight""" ): __magic_name__ = model_state_dict.pop(A_ ) else: __magic_name__ = model_state_dict.pop(A_ ) __magic_name__ = BioGptConfig.from_pretrained(A_ ) __magic_name__ = BioGptForCausalLM(A_ ) # check that it loads ok model_new.load_state_dict(A_ ) # save __magic_name__ = os.path.join(A_, A_ ) print(f'''Generating {pytorch_weights_dump_path}''' ) torch.save(A_, A_ ) print("""Conversion is done!""" ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCAmelCase : List[str] = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
713
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
0
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __lowerCAmelCase : Any = logging.getLogger(__name__) __lowerCAmelCase : List[Any] = 'pytorch_model.bin' @dataclasses.dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = dataclasses.field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} ) a__ = dataclasses.field( default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , ) @dataclasses.dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} ) a__ = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} ) a__ = dataclasses.field( default=_A , metadata={"""help""": """A csv or a json file containing the validation data."""} ) a__ = dataclasses.field( default=_A , metadata={"""help""": """The name of the task to train on."""} , ) a__ = dataclasses.field( default=_A , metadata={"""help""": """The list of labels for the task."""} ) @dataclasses.dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = dataclasses.field( metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} ) a__ = dataclasses.field( default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} ) a__ = dataclasses.field( default="""no""" , metadata={ """help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]""" } , ) a__ = dataclasses.field( default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) a__ = dataclasses.field( default=0.0 , metadata={ """help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions.""" } , ) a__ = dataclasses.field( default=_A , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , ) a__ = dataclasses.field( default=_A , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , ) a__ = dataclasses.field( default=_A , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , ) a__ = dataclasses.field( default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , ) a__ = dataclasses.field( default=1_00 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) a__ = dataclasses.field( default=_A , metadata={"""help""": """Random seed for initialization."""} , ) def a__ ( A_, A_, A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = datasets.concatenate_datasets([infer_input, infer_output], axis=1 ) if args.do_filter_by_confidence: __magic_name__ = dataset.filter(lambda A_ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 __magic_name__ = int(eval_result * len(A_ ) ) print(A_ ) __magic_name__ = dataset.sort("""probability""", reverse=A_ ) __magic_name__ = dataset.select(range(A_ ) ) __magic_name__ = dataset.remove_columns(["""label""", """probability"""] ) __magic_name__ = dataset.rename_column("""prediction""", """label""" ) __magic_name__ = dataset.map(lambda A_ : {"label": idalabel[example["label"]]} ) __magic_name__ = dataset.shuffle(seed=args.seed ) __magic_name__ = os.path.join(A_, f'''train_pseudo.{args.data_file_extension}''' ) if args.data_file_extension == "csv": dataset.to_csv(A_, index=A_ ) else: dataset.to_json(A_ ) def a__ ( A_, A_, A_, A_, **A_ ): '''simple docstring''' __magic_name__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() __magic_name__ = STModelArguments(model_name_or_path=A_ ) __magic_name__ = STDataArguments(train_file=A_, infer_file=A_ ) __magic_name__ = STTrainingArguments(output_dir=A_ ) __magic_name__ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(A_ ).items(): setattr(A_, A_, A_ ) for key, value in kwargs.items(): if hasattr(A_, A_ ): setattr(A_, A_, A_ ) # Sanity checks __magic_name__ = {} __magic_name__ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None __magic_name__ = args.train_file __magic_name__ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None __magic_name__ = args.eval_file for key in data_files: __magic_name__ = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: __magic_name__ = extension else: assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) __magic_name__ = f'''{args.output_dir}/self-train_iter-{{}}'''.format __magic_name__ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=A_ ) os.makedirs(A_, exist_ok=A_ ) accelerator.wait_for_everyone() __magic_name__ = None __magic_name__ = None __magic_name__ = 0 __magic_name__ = False # Show the progress bar __magic_name__ = tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0, int(args.max_selftrain_iterations ) ): __magic_name__ = data_dir_format(A_ ) assert os.path.exists(A_ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 __magic_name__ = os.path.join(A_, """stage-1""" ) __magic_name__ = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(A_, A_ ): arguments_dict.update({key: value} ) __magic_name__ = os.path.join(A_, """best-checkpoint""", A_ ) if os.path.exists(A_ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""", A_, A_, ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""", A_ ) finetune(**A_ ) accelerator.wait_for_everyone() assert os.path.exists(A_ ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""", A_ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data __magic_name__ = os.path.join(A_, """best-checkpoint""" ) __magic_name__ = os.path.join(A_, """stage-2""" ) # Update arguments_dict __magic_name__ = model_path __magic_name__ = data_files["""train"""] __magic_name__ = current_output_dir __magic_name__ = os.path.join(A_, """best-checkpoint""", A_ ) if os.path.exists(A_ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""", A_, A_, ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""", A_ ) finetune(**A_ ) accelerator.wait_for_everyone() assert os.path.exists(A_ ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""", A_ ) __magic_name__ = iteration __magic_name__ = data_dir_format(iteration + 1 ) __magic_name__ = AutoConfig.from_pretrained(os.path.join(A_, """best-checkpoint""" ) ) __magic_name__ = config.idalabel __magic_name__ = os.path.join(A_, """eval_results_best-checkpoint.json""" ) __magic_name__ = os.path.join(A_, """test_results_best-checkpoint.json""" ) assert os.path.exists(A_ ) with open(A_, """r""" ) as f: __magic_name__ = float(json.load(A_ )[args.eval_metric] ) __magic_name__ = os.path.join(A_, """infer_output_best-checkpoint.csv""" ) assert os.path.exists(A_ ) # Loading the dataset from local csv or json files. __magic_name__ = load_dataset(args.data_file_extension, data_files={"""data""": data_files["""infer"""]} )["""data"""] __magic_name__ = load_dataset("""csv""", data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(A_, exist_ok=A_ ) shutil.copy(A_, os.path.join(A_, f'''eval_results_iter-{iteration}.json''' ) ) if os.path.exists(A_ ): shutil.copy(A_, os.path.join(A_, f'''test_results_iter-{iteration}.json''' ) ) create_pseudo_labeled_data(A_, A_, A_, A_, A_, A_ ) accelerator.wait_for_everyone() __magic_name__ = os.path.join(A_, f'''train_pseudo.{args.data_file_extension}''' ) if args.evaluation_strategy != IntervalStrategy.NO.value: __magic_name__ = eval_result if best_iteration is None: __magic_name__ = new_iteration __magic_name__ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: __magic_name__ = new_iteration __magic_name__ = new_eval_result __magic_name__ = 0 else: if new_eval_result == best_eval_result: __magic_name__ = new_iteration __magic_name__ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: __magic_name__ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""", A_ ) logger.info("""Best evaluation result: %s = %f""", args.eval_metric, A_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(A_, f'''eval_results_iter-{iteration}.json''' ), os.path.join(A_, """eval_results_best-iteration.json""" ), ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""", args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""", args.eval_metric, A_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(A_, f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ), os.path.join(A_, """eval_results_best-iteration.json""" ), )
714
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
0
from ...configuration_utils import PretrainedConfig __lowerCAmelCase : List[str] = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """tapas""" def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=3_0522 , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Any=3072 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : int=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : int=10.0 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Tuple=1.0 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=1.0 , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]="ratio" , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=64 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ) -> Tuple: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_sizes __magic_name__ = initializer_range __magic_name__ = layer_norm_eps # Fine-tuning task hyperparameters __magic_name__ = positive_label_weight __magic_name__ = num_aggregation_labels __magic_name__ = aggregation_loss_weight __magic_name__ = use_answer_as_supervision __magic_name__ = answer_loss_importance __magic_name__ = use_normalized_answer_loss __magic_name__ = huber_loss_delta __magic_name__ = temperature __magic_name__ = aggregation_temperature __magic_name__ = use_gumbel_for_cells __magic_name__ = use_gumbel_for_aggregation __magic_name__ = average_approximation_function __magic_name__ = cell_selection_preference __magic_name__ = answer_loss_cutoff __magic_name__ = max_num_rows __magic_name__ = max_num_columns __magic_name__ = average_logits_per_cell __magic_name__ = select_one_column __magic_name__ = allow_empty_column_selection __magic_name__ = init_cell_selection_weights_to_zero __magic_name__ = reset_position_index_per_cell __magic_name__ = disable_per_token_loss # Aggregation hyperparameters __magic_name__ = aggregation_labels __magic_name__ = no_aggregation_label_index if isinstance(self.aggregation_labels , UpperCamelCase__ ): __magic_name__ = {int(UpperCamelCase__ ): v for k, v in aggregation_labels.items()}
715
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
0
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __magic_name__ = 128 elif "12-12" in model_name: __magic_name__ = 12 __magic_name__ = 12 elif "14-14" in model_name: __magic_name__ = 14 __magic_name__ = 14 elif "16-16" in model_name: __magic_name__ = 16 __magic_name__ = 16 else: raise ValueError("""Model not supported""" ) __magic_name__ = """huggingface/label-files""" if "speech-commands" in model_name: __magic_name__ = 35 __magic_name__ = """speech-commands-v2-id2label.json""" else: __magic_name__ = 527 __magic_name__ = """audioset-id2label.json""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ): v for k, v in idalabel.items()} __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( A_ ): '''simple docstring''' if "module.v" in name: __magic_name__ = name.replace("""module.v""", """audio_spectrogram_transformer""" ) if "cls_token" in name: __magic_name__ = name.replace("""cls_token""", """embeddings.cls_token""" ) if "dist_token" in name: __magic_name__ = name.replace("""dist_token""", """embeddings.distillation_token""" ) if "pos_embed" in name: __magic_name__ = name.replace("""pos_embed""", """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __magic_name__ = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: __magic_name__ = name.replace("""blocks""", """encoder.layer""" ) if "attn.proj" in name: __magic_name__ = name.replace("""attn.proj""", """attention.output.dense""" ) if "attn" in name: __magic_name__ = name.replace("""attn""", """attention.self""" ) if "norm1" in name: __magic_name__ = name.replace("""norm1""", """layernorm_before""" ) if "norm2" in name: __magic_name__ = name.replace("""norm2""", """layernorm_after""" ) if "mlp.fc1" in name: __magic_name__ = name.replace("""mlp.fc1""", """intermediate.dense""" ) if "mlp.fc2" in name: __magic_name__ = name.replace("""mlp.fc2""", """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __magic_name__ = name.replace("""audio_spectrogram_transformer.norm""", """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: __magic_name__ = name.replace("""module.mlp_head.0""", """classifier.layernorm""" ) if "module.mlp_head.1" in name: __magic_name__ = name.replace("""module.mlp_head.1""", """classifier.dense""" ) return name def a__ ( A_, A_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): __magic_name__ = orig_state_dict.pop(A_ ) if "qkv" in key: __magic_name__ = key.split(""".""" ) __magic_name__ = int(key_split[3] ) __magic_name__ = config.hidden_size if "weight" in key: __magic_name__ = val[:dim, :] __magic_name__ = val[dim : dim * 2, :] __magic_name__ = val[-dim:, :] else: __magic_name__ = val[:dim] __magic_name__ = val[dim : dim * 2] __magic_name__ = val[-dim:] else: __magic_name__ = val return orig_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(A_, A_ ) @torch.no_grad() def a__ ( A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_audio_spectrogram_transformer_config(A_ ) __magic_name__ = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict __magic_name__ = model_name_to_url[model_name] __magic_name__ = torch.hub.load_state_dict_from_url(A_, map_location="""cpu""" ) # remove some keys remove_keys(A_ ) # rename some keys __magic_name__ = convert_state_dict(A_, A_ ) # load 🤗 model __magic_name__ = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __magic_name__ = -4.2677393 if """speech-commands""" not in model_name else -6.845978 __magic_name__ = 4.5689974 if """speech-commands""" not in model_name else 5.5654526 __magic_name__ = 1024 if """speech-commands""" not in model_name else 128 __magic_name__ = ASTFeatureExtractor(mean=A_, std=A_, max_length=A_ ) if "speech-commands" in model_name: __magic_name__ = load_dataset("""speech_commands""", """v0.02""", split="""validation""" ) __magic_name__ = dataset[0]["""audio"""]["""array"""] else: __magic_name__ = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""", filename="""sample_audio.flac""", repo_type="""dataset""", ) __magic_name__ , __magic_name__ = torchaudio.load(A_ ) __magic_name__ = waveform.squeeze().numpy() __magic_name__ = feature_extractor(A_, sampling_rate=16000, return_tensors="""pt""" ) # forward pass __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __magic_name__ = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __magic_name__ = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __magic_name__ = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __magic_name__ = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __magic_name__ = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __magic_name__ = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __magic_name__ = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __magic_name__ = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3], A_, atol=1e-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(f'''MIT/{model_name}''' ) feature_extractor.push_to_hub(f'''MIT/{model_name}''' ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : Any = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
716
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
717
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
0
from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """WhisperFeatureExtractor""" a__ = """WhisperTokenizer""" def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> List[Any]: """simple docstring""" super().__init__(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self.feature_extractor __magic_name__ = False def _lowercase ( self : Dict , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=True ) -> Tuple: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__ ) def __call__( self : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str] ) -> Dict: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = kwargs.pop("""audio""" , UpperCamelCase__ ) __magic_name__ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ ) __magic_name__ = kwargs.pop("""text""" , UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __magic_name__ = args[0] __magic_name__ = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: __magic_name__ = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ ) if text is not None: __magic_name__ = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ ) if text is None: return inputs elif audio is None: return encodings else: __magic_name__ = encodings["""input_ids"""] return inputs def _lowercase ( self : Optional[int] , *UpperCamelCase__ : str , **UpperCamelCase__ : Tuple ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str]="np" ) -> List[str]: """simple docstring""" return self.tokenizer.get_prompt_ids(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
718
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
0
def a__ ( A_, A_ ): '''simple docstring''' if digit_amount > 0: return round(number - int(A_ ), A_ ) return number - int(A_ ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
719
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCAmelCase : Union[str, Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def a__ ( A_, A_, A_=8 ): '''simple docstring''' __magic_name__ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __magic_name__ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : VQModel , ) -> Union[str, Any]: """simple docstring""" super().__init__() self.register_modules( unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , ) __magic_name__ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _lowercase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple: """simple docstring""" if latents is None: __magic_name__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __magic_name__ = latents.to(UpperCamelCase__ ) __magic_name__ = latents * scheduler.init_noise_sigma return latents def _lowercase ( self : int , UpperCamelCase__ : List[Any]=0 ) -> Union[str, Any]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) __magic_name__ = torch.device(F'''cuda:{gpu_id}''' ) __magic_name__ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : Dict=0 ) -> int: """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) __magic_name__ = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __magic_name__ = None for cpu_offloaded_model in [self.unet, self.movq]: __magic_name__ , __magic_name__ = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ ) # We'll offload the last model manually. __magic_name__ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase ( self : int ) -> Tuple: """simple docstring""" if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase__ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase__ ) def __call__( self : Dict , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 100 , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ) -> int: """simple docstring""" __magic_name__ = self._execution_device __magic_name__ = guidance_scale > 1.0 if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __magic_name__ = torch.cat(UpperCamelCase__ , dim=0 ) __magic_name__ = image_embeds.shape[0] * num_images_per_prompt if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __magic_name__ = torch.cat(UpperCamelCase__ , dim=0 ) if do_classifier_free_guidance: __magic_name__ = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 ) __magic_name__ = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 ) __magic_name__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ ) self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ ) __magic_name__ = self.scheduler.timesteps __magic_name__ = self.unet.config.in_channels __magic_name__ , __magic_name__ = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor ) # create initial latent __magic_name__ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ): # expand the latents if we are doing classifier free guidance __magic_name__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __magic_name__ = {"""image_embeds""": image_embeds} __magic_name__ = self.unet( sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0] if do_classifier_free_guidance: __magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 ) __magic_name__ , __magic_name__ = noise_pred.chunk(2 ) __magic_name__ , __magic_name__ = variance_pred.chunk(2 ) __magic_name__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __magic_name__ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __magic_name__ = self.scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0] # post-processing __magic_name__ = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: __magic_name__ = image * 0.5 + 0.5 __magic_name__ = image.clamp(0 , 1 ) __magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __magic_name__ = self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase__ )
720
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
0
from collections import deque def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) __magic_name__ = deque() __magic_name__ = [False for _ in range(A_ )] __magic_name__ = [-1 for _ in range(A_ )] __magic_name__ = index_of[:] def strong_connect(A_, A_, A_ ): __magic_name__ = index # the number when this node is seen __magic_name__ = index # lowest rank node reachable from here index += 1 stack.append(A_ ) __magic_name__ = True for w in g[v]: if index_of[w] == -1: __magic_name__ = strong_connect(A_, A_, A_ ) __magic_name__ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __magic_name__ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __magic_name__ = [] __magic_name__ = stack.pop() __magic_name__ = False component.append(A_ ) while w != v: __magic_name__ = stack.pop() __magic_name__ = False component.append(A_ ) components.append(A_ ) return index __magic_name__ = [] for v in range(A_ ): if index_of[v] == -1: strong_connect(A_, 0, A_ ) return components def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = [[] for _ in range(A_ )] for u, v in edges: g[u].append(A_ ) return g if __name__ == "__main__": # Test __lowerCAmelCase : Tuple = 7 __lowerCAmelCase : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6] __lowerCAmelCase : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5] __lowerCAmelCase : Optional[int] = [(u, v) for u, v in zip(source, target)] __lowerCAmelCase : str = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
721
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = word.split() def justify(A_, A_, A_ ) -> str: __magic_name__ = max_width - width __magic_name__ = len(A_ ) if len(A_ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: __magic_name__ = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] __magic_name__ = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] __magic_name__ = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(A_ ): num_spaces_between_words_list[i] += 1 __magic_name__ = [] for i in range(A_ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * """ """ ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(A_ ) __magic_name__ = [] __magic_name__ = [] __magic_name__ = 0 for word in words: if width + len(A_ ) + len(A_ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(A_ ) width += len(A_ ) else: # justify the line and add it to result answer.append(justify(A_, A_, A_ ) ) # reset new line and new width __magic_name__ , __magic_name__ = [word], len(A_ ) __magic_name__ = max_width - width - len(A_ ) answer.append(""" """.join(A_ ) + (remaining_spaces + 1) * """ """ ) return answer if __name__ == "__main__": from doctest import testmod testmod()
700
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Optional[int] = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
701
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
0
__lowerCAmelCase : Any = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __lowerCAmelCase : str = ['a', 'b', 'c', 'd', 'e'] def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = start # add current to visited visited.append(A_ ) __magic_name__ = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __magic_name__ = topological_sort(A_, A_, A_ ) # if all neighbors visited add current to sort sort.append(A_ ) # if all vertices haven't been visited select a new one to visit if len(A_ ) != len(A_ ): for vertice in vertices: if vertice not in visited: __magic_name__ = topological_sort(A_, A_, A_ ) # return sort return sort if __name__ == "__main__": __lowerCAmelCase : List[Any] = topological_sort('a', [], []) print(sort)
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase : List[str] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
703
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
0
from __future__ import annotations from collections import Counter from random import random class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ) -> Dict: """simple docstring""" __magic_name__ = {} def _lowercase ( self : Dict , UpperCamelCase__ : str ) -> None: """simple docstring""" __magic_name__ = {} def _lowercase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> None: """simple docstring""" if nodea not in self.connections: self.add_node(UpperCamelCase__ ) if nodea not in self.connections: self.add_node(UpperCamelCase__ ) __magic_name__ = probability def _lowercase ( self : Optional[Any] ) -> list[str]: """simple docstring""" return list(self.connections ) def _lowercase ( self : Any , UpperCamelCase__ : str ) -> str: """simple docstring""" __magic_name__ = 0 __magic_name__ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(A_, A_, A_ ) __magic_name__ = Counter(graph.get_nodes() ) __magic_name__ = start for _ in range(A_ ): __magic_name__ = graph.transition(A_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
704
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Dict = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = ['ConvNextFeatureExtractor'] __lowerCAmelCase : str = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[str] = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __lowerCAmelCase : Optional[Any] = { 'sample_size': 32, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': 1000, 'block_out_channels': [32, 64], 'attention_head_dim': 8, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __lowerCAmelCase : Optional[int] = { 'sample_size': 64, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 3, 'num_class_embeds': 1000, 'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __lowerCAmelCase : List[str] = { 'sample_size': 256, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': None, 'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'default', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __lowerCAmelCase : Union[str, Any] = { 'num_train_timesteps': 40, 'sigma_min': 0.002, 'sigma_max': 80.0, } __lowerCAmelCase : Optional[int] = { 'num_train_timesteps': 201, 'sigma_min': 0.002, 'sigma_max': 80.0, } __lowerCAmelCase : Any = { 'num_train_timesteps': 151, 'sigma_min': 0.002, 'sigma_max': 80.0, } def a__ ( A_ ): '''simple docstring''' if isinstance(A_, A_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("""boolean value expected""" ) def a__ ( A_, A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = checkpoint[f'''{old_prefix}.in_layers.0.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.in_layers.0.bias'''] __magic_name__ = checkpoint[f'''{old_prefix}.in_layers.2.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.in_layers.2.bias'''] __magic_name__ = checkpoint[f'''{old_prefix}.emb_layers.1.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.emb_layers.1.bias'''] __magic_name__ = checkpoint[f'''{old_prefix}.out_layers.0.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.out_layers.0.bias'''] __magic_name__ = checkpoint[f'''{old_prefix}.out_layers.3.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.out_layers.3.bias'''] if has_skip: __magic_name__ = checkpoint[f'''{old_prefix}.skip_connection.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def a__ ( A_, A_, A_, A_, A_=None ): '''simple docstring''' __magic_name__ , __magic_name__ , __magic_name__ = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 ) __magic_name__ , __magic_name__ , __magic_name__ = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 ) __magic_name__ = checkpoint[f'''{old_prefix}.norm.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.norm.bias'''] __magic_name__ = weight_q.squeeze(-1 ).squeeze(-1 ) __magic_name__ = bias_q.squeeze(-1 ).squeeze(-1 ) __magic_name__ = weight_k.squeeze(-1 ).squeeze(-1 ) __magic_name__ = bias_k.squeeze(-1 ).squeeze(-1 ) __magic_name__ = weight_v.squeeze(-1 ).squeeze(-1 ) __magic_name__ = bias_v.squeeze(-1 ).squeeze(-1 ) __magic_name__ = ( checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) __magic_name__ = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = {} __magic_name__ = checkpoint["""time_embed.0.weight"""] __magic_name__ = checkpoint["""time_embed.0.bias"""] __magic_name__ = checkpoint["""time_embed.2.weight"""] __magic_name__ = checkpoint["""time_embed.2.bias"""] if unet_config["num_class_embeds"] is not None: __magic_name__ = checkpoint["""label_emb.weight"""] __magic_name__ = checkpoint["""input_blocks.0.0.weight"""] __magic_name__ = checkpoint["""input_blocks.0.0.bias"""] __magic_name__ = unet_config["""down_block_types"""] __magic_name__ = unet_config["""layers_per_block"""] __magic_name__ = unet_config["""attention_head_dim"""] __magic_name__ = unet_config["""block_out_channels"""] __magic_name__ = 1 __magic_name__ = channels_list[0] for i, layer_type in enumerate(A_ ): __magic_name__ = channels_list[i] __magic_name__ = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(A_ ): __magic_name__ = f'''down_blocks.{i}.resnets.{j}''' __magic_name__ = f'''input_blocks.{current_layer}.0''' __magic_name__ = True if j == 0 and downsample_block_has_skip else False __magic_name__ = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(A_ ): __magic_name__ = f'''down_blocks.{i}.resnets.{j}''' __magic_name__ = f'''input_blocks.{current_layer}.0''' __magic_name__ = True if j == 0 and downsample_block_has_skip else False __magic_name__ = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) __magic_name__ = f'''down_blocks.{i}.attentions.{j}''' __magic_name__ = f'''input_blocks.{current_layer}.1''' __magic_name__ = convert_attention( A_, A_, A_, A_, A_ ) current_layer += 1 if i != len(A_ ) - 1: __magic_name__ = f'''down_blocks.{i}.downsamplers.0''' __magic_name__ = f'''input_blocks.{current_layer}.0''' __magic_name__ = convert_resnet(A_, A_, A_, A_ ) current_layer += 1 __magic_name__ = current_channels # hardcoded the mid-block for now __magic_name__ = """mid_block.resnets.0""" __magic_name__ = """middle_block.0""" __magic_name__ = convert_resnet(A_, A_, A_, A_ ) __magic_name__ = """mid_block.attentions.0""" __magic_name__ = """middle_block.1""" __magic_name__ = convert_attention(A_, A_, A_, A_, A_ ) __magic_name__ = """mid_block.resnets.1""" __magic_name__ = """middle_block.2""" __magic_name__ = convert_resnet(A_, A_, A_, A_ ) __magic_name__ = 0 __magic_name__ = unet_config["""up_block_types"""] for i, layer_type in enumerate(A_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __magic_name__ = f'''up_blocks.{i}.resnets.{j}''' __magic_name__ = f'''output_blocks.{current_layer}.0''' __magic_name__ = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) current_layer += 1 if i != len(A_ ) - 1: __magic_name__ = f'''up_blocks.{i}.upsamplers.0''' __magic_name__ = f'''output_blocks.{current_layer-1}.1''' __magic_name__ = convert_resnet(A_, A_, A_, A_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __magic_name__ = f'''up_blocks.{i}.resnets.{j}''' __magic_name__ = f'''output_blocks.{current_layer}.0''' __magic_name__ = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) __magic_name__ = f'''up_blocks.{i}.attentions.{j}''' __magic_name__ = f'''output_blocks.{current_layer}.1''' __magic_name__ = convert_attention( A_, A_, A_, A_, A_ ) current_layer += 1 if i != len(A_ ) - 1: __magic_name__ = f'''up_blocks.{i}.upsamplers.0''' __magic_name__ = f'''output_blocks.{current_layer-1}.2''' __magic_name__ = convert_resnet(A_, A_, A_, A_ ) __magic_name__ = checkpoint["""out.0.weight"""] __magic_name__ = checkpoint["""out.0.bias"""] __magic_name__ = checkpoint["""out.2.weight"""] __magic_name__ = checkpoint["""out.2.bias"""] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') __lowerCAmelCase : int = parser.parse_args() __lowerCAmelCase : int = strabool(args.class_cond) __lowerCAmelCase : Tuple = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: __lowerCAmelCase : int = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __lowerCAmelCase : Any = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __lowerCAmelCase : int = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: __lowerCAmelCase : str = None __lowerCAmelCase : Union[str, Any] = con_pt_to_diffuser(args.unet_path, unet_config) __lowerCAmelCase : Optional[Any] = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __lowerCAmelCase : str = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __lowerCAmelCase : List[str] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __lowerCAmelCase : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') __lowerCAmelCase : Union[str, Any] = CMStochasticIterativeScheduler(**scheduler_config) __lowerCAmelCase : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
706
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
0
import os __lowerCAmelCase : Dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} def a__ ( A_ ): '''simple docstring''' __magic_name__ = 0 __magic_name__ = 0 while index < len(A_ ) - 1: __magic_name__ = SYMBOLS[numerals[index]] __magic_name__ = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def a__ ( A_ ): '''simple docstring''' __magic_name__ = """""" __magic_name__ = num // 1000 numerals += m_count * "M" num %= 1000 __magic_name__ = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 __magic_name__ = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def a__ ( A_ = "/p089_roman.txt" ): '''simple docstring''' __magic_name__ = 0 with open(os.path.dirname(A_ ) + roman_numerals_filename ) as filea: __magic_name__ = filea.readlines() for line in lines: __magic_name__ = line.strip() __magic_name__ = parse_roman_numerals(A_ ) __magic_name__ = generate_roman_numerals(A_ ) savings += len(A_ ) - len(A_ ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
707
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
0
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""image_processor""", """tokenizer"""] a__ = """AutoImageProcessor""" a__ = """AutoTokenizer""" def __init__( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" __magic_name__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCamelCase__ , ) __magic_name__ = kwargs.pop("""feature_extractor""" ) __magic_name__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self.image_processor __magic_name__ = False def __call__( self : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ) -> Any: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = kwargs.pop("""images""" , UpperCamelCase__ ) __magic_name__ = kwargs.pop("""text""" , UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __magic_name__ = args[0] __magic_name__ = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: __magic_name__ = self.image_processor(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) if text is not None: __magic_name__ = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ ) if text is None: return inputs elif images is None: return encodings else: __magic_name__ = encodings["""input_ids"""] return inputs def _lowercase ( self : Tuple , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @contextmanager def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) __magic_name__ = True __magic_name__ = self.tokenizer yield __magic_name__ = self.image_processor __magic_name__ = False def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=None ) -> Optional[Any]: """simple docstring""" if added_vocab is None: __magic_name__ = self.tokenizer.get_added_vocab() __magic_name__ = {} while tokens: __magic_name__ = re.search(R"""<s_(.*?)>""" , UpperCamelCase__ , re.IGNORECASE ) if start_token is None: break __magic_name__ = start_token.group(1 ) __magic_name__ = re.search(RF'''</s_{key}>''' , UpperCamelCase__ , re.IGNORECASE ) __magic_name__ = start_token.group() if end_token is None: __magic_name__ = tokens.replace(UpperCamelCase__ , """""" ) else: __magic_name__ = end_token.group() __magic_name__ = re.escape(UpperCamelCase__ ) __magic_name__ = re.escape(UpperCamelCase__ ) __magic_name__ = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , UpperCamelCase__ , re.IGNORECASE ) if content is not None: __magic_name__ = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __magic_name__ = self.tokenajson(UpperCamelCase__ , is_inner_value=UpperCamelCase__ , added_vocab=UpperCamelCase__ ) if value: if len(UpperCamelCase__ ) == 1: __magic_name__ = value[0] __magic_name__ = value else: # leaf nodes __magic_name__ = [] for leaf in content.split(R"""<sep/>""" ): __magic_name__ = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __magic_name__ = leaf[1:-2] # for categorical special tokens output[key].append(UpperCamelCase__ ) if len(output[key] ) == 1: __magic_name__ = output[key][0] __magic_name__ = tokens[tokens.find(UpperCamelCase__ ) + len(UpperCamelCase__ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCamelCase__ , added_vocab=UpperCamelCase__ ) if len(UpperCamelCase__ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase__ , ) return self.image_processor_class @property def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase__ , ) return self.image_processor
708
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
0